From 5c8b04d12a157430186d70ade94b0ca2959e5299 Mon Sep 17 00:00:00 2001 From: Allen Robel Date: Mon, 2 Mar 2026 13:53:49 -1000 Subject: [PATCH 001/109] [ignore] Add generic logger facility through the Log class and logging config --- plugins/module_utils/common/__init__.py | 0 plugins/module_utils/common/log.py | 465 +++++++++++ plugins/module_utils/logging_config.json | 36 + tests/__init__.py | 0 tests/config.yml | 3 + tests/unit/__init__.py | 0 tests/unit/module_utils/__init__.py | 0 tests/unit/test_log.py | 931 +++++++++++++++++++++++ 8 files changed, 1435 insertions(+) create mode 100644 plugins/module_utils/common/__init__.py create mode 100644 plugins/module_utils/common/log.py create mode 100644 plugins/module_utils/logging_config.json create mode 100644 tests/__init__.py create mode 100644 tests/config.yml create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/module_utils/__init__.py create mode 100644 tests/unit/test_log.py diff --git a/plugins/module_utils/common/__init__.py b/plugins/module_utils/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/common/log.py b/plugins/module_utils/common/log.py new file mode 100644 index 00000000..29182539 --- /dev/null +++ b/plugins/module_utils/common/log.py @@ -0,0 +1,465 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import json +import logging +from enum import Enum +from logging.config import dictConfig +from os import environ +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from ansible.module_utils.basic import AnsibleModule + + +class ValidLogHandlers(str, Enum): + """Valid logging handler classes (must not log to console).""" + + FILE_HANDLER = "logging.FileHandler" + ROTATING_FILE_HANDLER = "logging.handlers.RotatingFileHandler" + TIMED_ROTATING_FILE_HANDLER = "logging.handlers.TimedRotatingFileHandler" + WATCHED_FILE_HANDLER = "logging.handlers.WatchedFileHandler" + + +class Log: + """ + # Summary + + Create the base nd logging object. + + ## Raises + + - `ValueError` if: + - An error is encountered reading the logging config file. + - An error is encountered parsing the logging config file. + - An invalid handler is found in the logging config file. + - Valid handlers are defined in `ValidLogHandlers`. + - No formatters are found in the logging config file that + are associated with the configured handlers. + - `TypeError` if: + - `develop` is not a boolean. + + ## Usage + + By default, Log() does the following: + + 1. Reads the environment variable `ND_LOGGING_CONFIG` to determine + the path to the logging config file. If the environment variable is + not set, then logging is disabled. + 2. Sets `develop` to False. This disables exceptions raised by the + logging module itself. + + Hence, the simplest usage for Log() is: + + - Set the environment variable `ND_LOGGING_CONFIG` to the + path of the logging config file. `bash` shell is used in the + example below. + + ```bash + export ND_LOGGING_CONFIG="/path/to/logging_config.json" + ``` + + - Instantiate a Log() object instance and call `commit()` on the instance: + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log + try: + log = Log() + log.commit() + except ValueError as error: + # handle error + ``` + + To later disable logging, unset the environment variable. + `bash` shell is used in the example below. + + ```bash + unset ND_LOGGING_CONFIG + ``` + + To enable exceptions from the logging module (not recommended, unless needed for + development), set `develop` to True: + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log + try: + log = Log() + log.develop = True + log.commit() + except ValueError as error: + # handle error + ``` + + To directly set the path to the logging config file, overriding the + `ND_LOGGING_CONFIG` environment variable, set the `config` + property prior to calling `commit()`: + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log + try: + log = Log() + log.config = "/path/to/logging_config.json" + log.commit() + except ValueError as error: + # handle error + ``` + + At this point, a base/parent logger is created for which all other + loggers throughout the nd collection will be children. + This allows for a single logging config to be used for all modules in the + collection, and allows for the logging config to be specified in a + single place external to the code. + + ## Example module code using the Log() object + + The `setup_logging()` helper is the recommended way to configure logging in module `main()` functions. + It handles exceptions internally by calling `module.fail_json()`. + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import setup_logging + + def main(): + module = AnsibleModule(...) + log = setup_logging(module) + + task = AnsibleTask() + ``` + + To enable logging exceptions during development, pass `develop=True`: + + ```python + log = setup_logging(module, develop=True) + ``` + + Alternatively, `Log()` can be used directly when finer control is needed: + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log + + def main(): + try: + log = Log() + log.commit() + except ValueError as error: + ansible_module.fail_json(msg=str(error)) + + task = AnsibleTask() + ``` + + In the AnsibleTask() class (or any other classes running in the + main() function's call stack i.e. classes instantiated in either + main() or in AnsibleTask()). + + ```python + class AnsibleTask: + def __init__(self): + self.class_name = self.__class__.__name__ + self.log = logging.getLogger(f"nd.{self.class_name}") + def some_method(self): + self.log.debug("This is a debug message.") + ``` + + ## Logging Config File + + The logging config file MUST conform to `logging.config.dictConfig` + from Python's standard library and MUST NOT contain any handlers or + that log to stdout or stderr. The logging config file MUST only + contain handlers that log to files. + + An example logging config file is shown below: + + ```json + { + "version": 1, + "formatters": { + "standard": { + "class": "logging.Formatter", + "format": "%(asctime)s - %(levelname)s - [%(name)s.%(funcName)s.%(lineno)d] %(message)s" + } + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "formatter": "standard", + "level": "DEBUG", + "filename": "/tmp/nd.log", + "mode": "a", + "encoding": "utf-8", + "maxBytes": 50000000, + "backupCount": 4 + } + }, + "loggers": { + "nd": { + "handlers": [ + "file" + ], + "level": "DEBUG", + "propagate": false + } + }, + "root": { + "level": "INFO", + "handlers": [ + "file" + ] + } + } + ``` + """ + + def __init__(self, config: Optional[str] = None, develop: bool = False): + self.class_name = self.__class__.__name__ + # Disable exceptions raised by the logging module. + # Set this to True during development to catch logging errors. + logging.raiseExceptions = False + + self._config: Optional[str] = environ.get("ND_LOGGING_CONFIG", None) + self._develop: bool = False + if config is not None: + self.config = config + self.develop = develop + + def disable_logging(self) -> None: + """ + # Summary + + Disable logging by removing all handlers from the base logger. + + ## Raises + + None + """ + logger = logging.getLogger() + for handler in logger.handlers.copy(): + try: + logger.removeHandler(handler) + except ValueError: # if handler already removed + pass + logger.addHandler(logging.NullHandler()) + logger.propagate = False + + def enable_logging(self) -> None: + """ + # Summary + + Enable logging by reading the logging config file and configuring + the base logger instance. + + ## Raises + - `ValueError` if: + - An error is encountered reading the logging config file. + """ + if self.config is None or self.config.strip() == "": + return + + try: + with open(self.config, "r", encoding="utf-8") as file: + try: + logging_config = json.load(file) + except json.JSONDecodeError as error: + msg = f"error parsing logging config from {self.config}. " + msg += f"Error detail: {error}" + raise ValueError(msg) from error + except IOError as error: + msg = f"error reading logging config from {self.config}. " + msg += f"Error detail: {error}" + raise ValueError(msg) from error + + try: + self._validate_logging_config(logging_config) + except ValueError as error: + raise ValueError(str(error)) from error + + try: + dictConfig(logging_config) + except (RuntimeError, TypeError, ValueError) as error: + msg = "logging.config.dictConfig: " + msg += f"Unable to configure logging from {self.config}. " + msg += f"Error detail: {error}" + raise ValueError(msg) from error + + def _validate_logging_config(self, logging_config: dict) -> None: + """ + # Summary + + - Validate the logging config file. + - Ensure that the logging config file does not contain any handlers + that log to console, stdout, or stderr. + + ## Raises + + - `ValueError` if: + - The logging config file contains no handlers. + - Any handler's `class` property is not one of the classes + defined in `ValidLogHandlers`. + + ## Usage + + ```python + log = Log() + log.config = "/path/to/logging_config.json" + log.commit() + ``` + """ + msg = "" + if len(logging_config.get("handlers", {})) == 0: + msg = "logging.config.dictConfig: " + msg += "No file handlers found. " + msg += "Add a file handler to the logging config file " + msg += f"and try again: {self.config}" + raise ValueError(msg) + bad_handlers = [] + for handler_name, handler_config in logging_config.get("handlers", {}).items(): + handler_class = handler_config.get("class", "") + if handler_class not in set(ValidLogHandlers): + msg = "logging.config.dictConfig: " + msg += "handlers found that may interrupt Ansible module " + msg += "execution. " + msg += "Remove these handlers from the logging config file " + msg += "and try again. " + bad_handlers.append(handler_name) + if len(bad_handlers) > 0: + msg += f"Handlers: {','.join(bad_handlers)}. " + msg += f"Logging config file: {self.config}." + raise ValueError(msg) + + def commit(self) -> None: + """ + # Summary + + - If `config` is None, disable logging. + - If `config` is a JSON file conformant with + `logging.config.dictConfig` from Python's standard library, read the file and configure the + base logger instance from the file's contents. + + ## Raises + + - `ValueError` if: + - An error is encountered reading the logging config file. + + ## Notes + + 1. If self.config is None, then logging is disabled. + 2. If self.config is a path to a JSON file, then the file is read + and logging is configured from the file. + + ## Usage + + ```python + log = Log() + log.config = "/path/to/logging_config.json" + log.commit() + ``` + """ + if self.config is None: + self.disable_logging() + else: + self.enable_logging() + + @property + def config(self) -> Optional[str]: + """ + ## Summary + + Path to a JSON file from which logging config is read. + JSON file must conform to `logging.config.dictConfig` from Python's + standard library. + + ## Default + + If the environment variable `ND_LOGGING_CONFIG` is set, then + the value of that variable is used. Otherwise, None. + + The environment variable can be overridden by directly setting + `config` to one of the following prior to calling `commit()`: + + 1. None. Logging will be disabled. + 2. Path to a JSON file from which logging config is read. + Must conform to `logging.config.dictConfig` from Python's + standard library. + """ + return self._config + + @config.setter + def config(self, value: Optional[str]) -> None: + self._config = value + + @property + def develop(self) -> bool: + """ + # Summary + + Disable or enable exceptions raised by the logging module. + + ## Default + + `False` + + ## Valid Values + + - `True`: Exceptions will be raised by the logging module. + - `False`: Exceptions will not be raised by the logging module. + """ + return self._develop + + @develop.setter + def develop(self, value: bool) -> None: + method_name = "develop" + if not isinstance(value, bool): + msg = f"{self.class_name}.{method_name}: Expected boolean for develop. " + msg += f"Got: type {type(value).__name__} for value {value}." + raise TypeError(msg) + self._develop = value + logging.raiseExceptions = value + + +def setup_logging(module: "AnsibleModule", develop: bool = False) -> Log: + """ + # Summary + + Configure nd collection logging and return the `Log` instance. + + Intended for use in each Ansible module's `main()` function after + `AnsibleModule` is instantiated. + + ## Raises + + None + + ## Notes + + - Calls `module.fail_json()` if logging configuration fails, which + exits the module with an error message rather than raising an exception. + + ## Usage + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.log import setup_logging + + def main(): + module = AnsibleModule(...) + log = setup_logging(module) + ``` + + To enable logging exceptions during development, pass `develop=True`: + + ```python + log = setup_logging(module, develop=True) + ``` + """ + try: + log = Log(develop=develop) + log.commit() + except ValueError as error: + module.fail_json(msg=str(error)) + return log diff --git a/plugins/module_utils/logging_config.json b/plugins/module_utils/logging_config.json new file mode 100644 index 00000000..e87ddf05 --- /dev/null +++ b/plugins/module_utils/logging_config.json @@ -0,0 +1,36 @@ +{ + "version": 1, + "formatters": { + "standard": { + "class": "logging.Formatter", + "format": "%(asctime)s - %(levelname)s - [%(name)s.%(funcName)s.%(lineno)d] %(message)s" + } + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "formatter": "standard", + "level": "DEBUG", + "filename": "/tmp/nd.log", + "mode": "a", + "encoding": "utf-8", + "maxBytes": 50000000, + "backupCount": 4 + } + }, + "loggers": { + "nd": { + "handlers": [ + "file" + ], + "level": "DEBUG", + "propagate": false + } + }, + "root": { + "level": "INFO", + "handlers": [ + "file" + ] + } +} \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/config.yml b/tests/config.yml new file mode 100644 index 00000000..7cf024ab --- /dev/null +++ b/tests/config.yml @@ -0,0 +1,3 @@ +modules: + # Limit Python version to control node Python versions + python_requires: controller diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/module_utils/__init__.py b/tests/unit/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/test_log.py b/tests/unit/test_log.py new file mode 100644 index 00000000..96c11346 --- /dev/null +++ b/tests/unit/test_log.py @@ -0,0 +1,931 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for plugins/module_utils/log.py +""" + +# See the following regarding *_fixture imports +# https://pylint.pycqa.org/en/latest/user_guide/messages/warning/redefined-outer-name.html +# Due to the above, we also need to disable unused-import +# pylint: disable=unused-import +# Some fixtures need to use *args to match the signature of the function they are mocking +# pylint: disable=unused-argument +# Some tests require calling protected methods +# pylint: disable=protected-access +# pylint: disable=unused-variable +# pylint: disable=line-too-long +# pylint: disable=too-many-lines + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import inspect +import json +import logging +from unittest.mock import MagicMock + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log, setup_logging +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import does_not_raise + + +def logging_config(logging_config_file) -> dict: + """ + ### Summary + Return a logging configuration conformant with logging.config.dictConfig. + """ + return { + "version": 1, + "formatters": { + "standard": { + "class": "logging.Formatter", + "format": "%(asctime)s - %(levelname)s - [%(name)s.%(funcName)s.%(lineno)d] %(message)s", + } + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "formatter": "standard", + "level": "DEBUG", + "filename": logging_config_file, + "mode": "a", + "encoding": "utf-8", + "maxBytes": 500000, + "backupCount": 4, + } + }, + "loggers": {"nd": {"handlers": ["file"], "level": "DEBUG", "propagate": False}}, + "root": {"level": "INFO", "handlers": ["file"]}, + } + + +def test_log_00000(monkeypatch) -> None: + """ + # Summary + + Verify default state of `Log()` when `ND_LOGGING_CONFIG` is not set. + + ## Test + + - `ND_LOGGING_CONFIG` is not set. + - `instance.config` is `None`. + - `instance.develop` is `False`. + - `logging.raiseExceptions` is `False`. + + ## Classes and Methods + + - `Log.__init__()` + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + + with does_not_raise(): + instance = Log() + + assert instance.config is None + assert instance.develop is False + assert logging.raiseExceptions is False + + +def test_log_00010(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify Log().commit() happy path. log. logs to the logfile and the log message contains the calling method's name. + + ## Test + + - Log().commit() is called with a valid logging config. + - log.info(), log.debug(), log.warning(), log.critical() all write to the logfile. + - The log message contains the calling method's name. + + ## Classes and Methods + + - Log().commit() + """ + method_name = inspect.stack()[0][3] + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.commit() + + info_msg = "foo" + debug_msg = "bing" + warning_msg = "bar" + critical_msg = "baz" + log = logging.getLogger("nd.test_logger") + log.info(info_msg) + log.debug(debug_msg) + log.warning(warning_msg) + log.critical(critical_msg) + assert logging.getLevelName(log.getEffectiveLevel()) == "DEBUG" + assert info_msg in log_file.read_text(encoding="UTF-8") + assert debug_msg in log_file.read_text(encoding="UTF-8") + assert warning_msg in log_file.read_text(encoding="UTF-8") + assert critical_msg in log_file.read_text(encoding="UTF-8") + # test that the log message includes the method name + assert method_name in log_file.read_text(encoding="UTF-8") + + +def test_log_00020(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `Log(config=...)` constructor parameter enables logging without setting `ND_LOGGING_CONFIG`. + + ## Test + + - `ND_LOGGING_CONFIG` is not set. + - A valid config path is passed directly to `Log(config=...)`. + - `commit()` succeeds and messages appear in the log file. + + ## Classes and Methods + + - `Log.__init__()` + - `Log.commit()` + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + with does_not_raise(): + instance = Log(config=str(config_file)) + instance.commit() + + msg = "hello_from_test_log_00020" + log = logging.getLogger("nd.test_log_00020") + log.info(msg) + assert msg in log_file.read_text(encoding="UTF-8") + + +def test_log_00030(monkeypatch) -> None: + """ + # Summary + + Verify `Log(develop=True)` constructor parameter sets `develop` and `logging.raiseExceptions`. + + ## Test + + - `Log(develop=True)` is instantiated. + - `instance.develop` is `True`. + - `logging.raiseExceptions` is `True`. + + ## Classes and Methods + + - `Log.__init__()` + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + + with does_not_raise(): + instance = Log(develop=True) + + assert instance.develop is True + assert logging.raiseExceptions is True + + +def test_log_00100(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify nothing is logged when ND_LOGGING_CONFIG is not set. + + ## Test + + - ND_LOGGING_CONFIG is not set. + - Log().commit() succeeds. + - No logfile is created. + + ## Classes and Methods + + - Log().commit() + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + with does_not_raise(): + instance = Log() + instance.commit() + + info_msg = "foo" + debug_msg = "bing" + warning_msg = "bar" + critical_msg = "baz" + log = logging.getLogger("nd.test_logger") + log.info(info_msg) + log.debug(debug_msg) + log.warning(warning_msg) + log.critical(critical_msg) + # test that nothing was logged (file was not created) + with pytest.raises(FileNotFoundError): + log_file.read_text(encoding="UTF-8") + + +@pytest.mark.parametrize("env_var", [(""), (" ")]) +def test_log_00110(tmp_path, monkeypatch, env_var) -> None: + """ + # Summary + + Verify nothing is logged when ND_LOGGING_CONFIG is set to an empty string or whitespace. + + ## Test + + - ND_LOGGING_CONFIG is set to an empty string or whitespace. + - Log().commit() succeeds. + - No logfile is created. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", env_var) + + with does_not_raise(): + instance = Log() + instance.commit() + + info_msg = "foo" + debug_msg = "bing" + warning_msg = "bar" + critical_msg = "baz" + log = logging.getLogger("nd.test_logger") + log.info(info_msg) + log.debug(debug_msg) + log.warning(warning_msg) + log.critical(critical_msg) + # test that nothing was logged (file was not created) + with pytest.raises(FileNotFoundError): + log_file.read_text(encoding="UTF-8") + + +def test_log_00120(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify nothing is logged when Log().config is set to None, overriding ND_LOGGING_CONFIG. + + ## Test Setup + + - ND_LOGGING_CONFIG is set to a file that exists, which would normally enable logging. + - Log().config is set to None, which overrides ND_LOGGING_CONFIG. + + ## Test + + - Nothing is logged because Log().config overrides ND_LOGGING_CONFIG. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.config = None + instance.commit() + + info_msg = "foo" + debug_msg = "bing" + warning_msg = "bar" + critical_msg = "baz" + log = logging.getLogger("nd.test_logger") + log.info(info_msg) + log.debug(debug_msg) + log.warning(warning_msg) + log.critical(critical_msg) + # test that nothing was logged (file was not created) + with pytest.raises(FileNotFoundError): + log_file.read_text(encoding="UTF-8") + + +def test_log_00130(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `instance.config` set to a file path overrides `ND_LOGGING_CONFIG`, logging to the new file. + + ## Test Setup + + - `ND_LOGGING_CONFIG` points to config A (log file A). + - `instance.config` is set to config B (log file B) after instantiation. + + ## Test + + - Messages appear in log file B, not log file A. + + ## Classes and Methods + + - `Log.config` (setter) + - `Log.commit()` + """ + log_dir_a = tmp_path / "log_dir_a" + log_dir_a.mkdir() + config_file_a = log_dir_a / "logging_config_a.json" + log_file_a = log_dir_a / "nd_a.log" + config_a = logging_config(str(log_file_a)) + with open(config_file_a, "w", encoding="UTF-8") as fp: + json.dump(config_a, fp) + + log_dir_b = tmp_path / "log_dir_b" + log_dir_b.mkdir() + config_file_b = log_dir_b / "logging_config_b.json" + log_file_b = log_dir_b / "nd_b.log" + config_b = logging_config(str(log_file_b)) + with open(config_file_b, "w", encoding="UTF-8") as fp: + json.dump(config_b, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file_a)) + + with does_not_raise(): + instance = Log() + instance.config = str(config_file_b) + instance.commit() + + msg = "hello_from_test_log_00130" + log = logging.getLogger("nd.test_log_00130") + log.info(msg) + assert msg in log_file_b.read_text(encoding="UTF-8") + assert not log_file_a.exists() + + +def test_log_00200(monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file does not exist. + + ## Classes and Methods + + - Log().commit() + """ + config_file = "DOES_NOT_EXIST.json" + monkeypatch.setenv("ND_LOGGING_CONFIG", config_file) + + with does_not_raise(): + instance = Log() + + match = rf"error reading logging config from {config_file}\.\s+" + match += r"Error detail:\s+\[Errno 2\]\s+No such file or directory:\s+" + match += rf"\'{config_file}\'" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00210(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file contains invalid JSON. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump({"BAD": "JSON"}, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = r"logging.config.dictConfig:\s+" + match += r"No file handlers found\.\s+" + match += r"Add a file handler to the logging config file\s+" + match += rf"and try again: {config_file}" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00220(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file does not contain JSON. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + with open(config_file, "w", encoding="UTF-8") as fp: + fp.write("NOT JSON") + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = rf"error parsing logging config from {config_file}\.\s+" + match += r"Error detail: Expecting value: line 1 column 1 \(char 0\)" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00230(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file contains handler(s) that emit to non-file destinations. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + config["handlers"]["console"] = { + "class": "logging.StreamHandler", + "formatter": "standard", + "level": "DEBUG", + "stream": "ext://sys.stdout", + } + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = r"logging.config.dictConfig:\s+" + match += r"handlers found that may interrupt Ansible module\s+" + match += r"execution\.\s+" + match += r"Remove these handlers from the logging config file and\s+" + match += r"try again\.\s+" + match += r"Handlers:\s+.*\.\s+" + match += r"Logging config file:\s+.*\." + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00231(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify no `ValueError` is raised when a handler uses a non-standard name but a valid handler class (e.g. `logging.handlers.RotatingFileHandler`). + + ## Test + + - Previously, validation checked the handler key name rather than the class, so `"my_file_handler"` would have been incorrectly rejected. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + # Rename the handler key from "file" to a non-standard name. + config["handlers"]["my_file_handler"] = config["handlers"].pop("file") + config["loggers"]["nd"]["handlers"] = ["my_file_handler"] + config["root"]["handlers"] = ["my_file_handler"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.commit() + + +def test_log_00232(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised when a handler is named `"file"` but its `class` property is `logging.StreamHandler`. + + ## Test + + - Previously, validation checked the handler key name rather than the class, so a `StreamHandler` named `"file"` would have been incorrectly accepted. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + # Keep the key name "file" but switch to a disallowed handler class. + config["handlers"]["file"]["class"] = "logging.StreamHandler" + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = r"logging.config.dictConfig:\s+" + match += r"handlers found that may interrupt Ansible module\s+" + match += r"execution\.\s+" + match += r"Remove these handlers from the logging config file and\s+" + match += r"try again\.\s+" + match += r"Handlers:\s+.*\.\s+" + match += r"Logging config file:\s+.*\." + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00233(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `commit()` does not raise when the handler class is `logging.FileHandler`. + + ## Test + + - Config uses `logging.FileHandler` (a valid handler class per `ValidLogHandlers`). + - `commit()` succeeds without raising. + + ## Classes and Methods + + - `Log.commit()` + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + config["handlers"]["file"]["class"] = "logging.FileHandler" + del config["handlers"]["file"]["maxBytes"] + del config["handlers"]["file"]["backupCount"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.commit() + + +def test_log_00234(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `commit()` does not raise when the handler class is `logging.handlers.TimedRotatingFileHandler`. + + ## Test + + - Config uses `logging.handlers.TimedRotatingFileHandler` (a valid handler class per `ValidLogHandlers`). + - `commit()` succeeds without raising. + + ## Classes and Methods + + - `Log.commit()` + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + config["handlers"]["file"]["class"] = "logging.handlers.TimedRotatingFileHandler" + config["handlers"]["file"]["when"] = "midnight" + del config["handlers"]["file"]["maxBytes"] + del config["handlers"]["file"]["mode"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.commit() + + +def test_log_00235(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `commit()` does not raise when the handler class is `logging.handlers.WatchedFileHandler`. + + ## Test + + - Config uses `logging.handlers.WatchedFileHandler` (a valid handler class per `ValidLogHandlers`). + - `commit()` succeeds without raising. + + ## Classes and Methods + + - `Log.commit()` + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + config["handlers"]["file"]["class"] = "logging.handlers.WatchedFileHandler" + del config["handlers"]["file"]["maxBytes"] + del config["handlers"]["file"]["backupCount"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + instance.commit() + + +def test_log_00240(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file does not contain any handlers. + + ## Notes + + - `test_log_00210` raises the same error message in the case where the logging config file contains JSON that is not conformant with dictConfig. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + del config["handlers"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = r"logging.config.dictConfig:\s+" + match += r"No file handlers found\.\s+" + match += r"Add a file handler to the logging config file\s+" + match += rf"and try again: {config_file}" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00250(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `ValueError` is raised if logging config file does not contain any formatters or contains formatters that are not associated with handlers. + + ## Classes and Methods + + - Log().commit() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + del config["formatters"] + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + with does_not_raise(): + instance = Log() + + match = r"logging.config.dictConfig:\s+" + match += r"Unable to configure logging from\s+.*\.\s+" + match += r"Error detail: Unable to configure handler.*" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_log_00300() -> None: + """ + # Summary + + Verify `TypeError` is raised if develop is set to a non-bool. + + ## Classes and Methods + + - Log().develop (setter) + """ + with does_not_raise(): + instance = Log() + + match = r"Log\.develop:\s+" + match += r"Expected boolean for develop\.\s+" + match += r"Got: type str for value FOO\." + with pytest.raises(TypeError, match=match): + instance.develop = "FOO" # type: ignore[assignment] + + +@pytest.mark.parametrize("develop", [(True), (False)]) +def test_log_00310(develop) -> None: + """ + # Summary + + Verify develop is set correctly if passed a bool and no exceptions are raised. + + ## Classes and Methods + + - Log().develop (setter) + """ + with does_not_raise(): + instance = Log() + instance.develop = develop + assert instance.develop == develop + + +@pytest.mark.parametrize("develop", [(True), (False)]) +def test_log_00320(develop) -> None: + """ + # Summary + + Verify `Log.develop` setter side effect: `logging.raiseExceptions` is updated to match `develop`. + + ## Test + + - `instance.develop` is set to `develop`. + - `instance.develop == develop`. + - `logging.raiseExceptions == develop`. + + ## Classes and Methods + + - `Log.develop` (setter) + """ + with does_not_raise(): + instance = Log() + instance.develop = develop + assert instance.develop == develop + assert logging.raiseExceptions == develop + + +def test_setup_logging_00010(tmp_path, monkeypatch) -> None: + """ + # Summary + + Verify `setup_logging()` returns a `Log` instance when the config is valid. + + ## Test + + - `ND_LOGGING_CONFIG` points to a valid logging config file. + - `setup_logging()` returns a `Log` instance. + - `module.fail_json()` is not called. + + ## Classes and Methods + + - setup_logging() + """ + log_dir = tmp_path / "log_dir" + log_dir.mkdir() + config_file = log_dir / "logging_config.json" + log_file = log_dir / "nd.log" + config = logging_config(str(log_file)) + with open(config_file, "w", encoding="UTF-8") as fp: + json.dump(config, fp) + + monkeypatch.setenv("ND_LOGGING_CONFIG", str(config_file)) + + mock_module = MagicMock() + + with does_not_raise(): + result = setup_logging(mock_module) + + assert isinstance(result, Log) + mock_module.fail_json.assert_not_called() + + +@pytest.mark.parametrize("develop", [(True), (False)]) +def test_setup_logging_00040(monkeypatch, develop) -> None: + """ + # Summary + + Verify `setup_logging()` passes `develop` through to the `Log` instance. + + ## Test + + - `ND_LOGGING_CONFIG` is not set. + - `setup_logging()` is called with `develop` set to `True` or `False`. + - `result.develop` matches the value passed. + - `logging.raiseExceptions` matches the value passed. + + ## Classes and Methods + + - `setup_logging()` + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + + mock_module = MagicMock() + + with does_not_raise(): + result = setup_logging(mock_module, develop=develop) + + assert isinstance(result, Log) + assert result.develop is develop + assert logging.raiseExceptions is develop + mock_module.fail_json.assert_not_called() + + +def test_setup_logging_00020(monkeypatch) -> None: + """ + # Summary + + Verify `setup_logging()` calls `module.fail_json()` when the config file does not exist. + + ## Test + + - `ND_LOGGING_CONFIG` points to a nonexistent file. + - `setup_logging()` calls `module.fail_json()` with an error message describing the failure. + + ## Classes and Methods + + - setup_logging() + """ + config_file = "DOES_NOT_EXIST.json" + monkeypatch.setenv("ND_LOGGING_CONFIG", config_file) + + mock_module = MagicMock() + mock_module.fail_json.side_effect = SystemExit + + with pytest.raises(SystemExit): + setup_logging(mock_module) + + mock_module.fail_json.assert_called_once() + call_kwargs = mock_module.fail_json.call_args.kwargs + assert "error reading logging config" in call_kwargs["msg"] + + +def test_setup_logging_00030(monkeypatch) -> None: + """ + # Summary + + Verify `setup_logging()` returns a `Log` instance with logging disabled when `ND_LOGGING_CONFIG` is not set. + + ## Test + + - `ND_LOGGING_CONFIG` is not set. + - `setup_logging()` returns a `Log` instance. + - `module.fail_json()` is not called. + + ## Classes and Methods + + - `setup_logging()` + """ + monkeypatch.delenv("ND_LOGGING_CONFIG", raising=False) + + mock_module = MagicMock() + + with does_not_raise(): + result = setup_logging(mock_module) + + assert isinstance(result, Log) + mock_module.fail_json.assert_not_called() From b00aebea1eeb99f59aae0c8011e65d0df95f200e Mon Sep 17 00:00:00 2001 From: AKDRG Date: Wed, 11 Mar 2026 21:35:19 +0530 Subject: [PATCH 002/109] Initial Commit : ND Manage Switches ( Smart Endpoints + Pydantic Models + Module ) --- .../nd_manage_switches/manage_credentials.py | 153 + .../manage_fabric_bootstrap.py | 151 + .../manage_fabric_config.py | 303 ++ .../manage_fabric_discovery.py | 90 + .../manage_fabric_switch_actions.py | 754 +++++ .../manage_fabric_switches.py | 290 ++ plugins/module_utils/models/__init__.py | 1 + .../models/nd_manage_switches/__init__.py | 143 + .../nd_manage_switches/bootstrap_models.py | 388 +++ .../nd_manage_switches/config_models.py | 654 +++++ .../nd_manage_switches/discovery_models.py | 268 ++ .../models/nd_manage_switches/enums.py | 320 ++ .../nd_manage_switches/preprovision_models.py | 218 ++ .../models/nd_manage_switches/rma_models.py | 258 ++ .../switch_actions_models.py | 116 + .../nd_manage_switches/switch_data_models.py | 488 +++ .../models/nd_manage_switches/validators.py | 115 + plugins/module_utils/nd_switch_resources.py | 2611 +++++++++++++++++ .../utils/nd_manage_switches/__init__.py | 52 + .../nd_manage_switches/bootstrap_utils.py | 111 + .../utils/nd_manage_switches/exceptions.py | 20 + .../utils/nd_manage_switches/fabric_utils.py | 177 ++ .../utils/nd_manage_switches/payload_utils.py | 90 + .../nd_manage_switches/switch_helpers.py | 138 + .../nd_manage_switches/switch_wait_utils.py | 593 ++++ plugins/modules/nd_manage_switches.py | 622 ++++ 26 files changed, 9124 insertions(+) create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py create mode 100644 plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py create mode 100644 plugins/module_utils/models/__init__.py create mode 100644 plugins/module_utils/models/nd_manage_switches/__init__.py create mode 100644 plugins/module_utils/models/nd_manage_switches/bootstrap_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/config_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/discovery_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/enums.py create mode 100644 plugins/module_utils/models/nd_manage_switches/preprovision_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/rma_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/switch_actions_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/switch_data_models.py create mode 100644 plugins/module_utils/models/nd_manage_switches/validators.py create mode 100644 plugins/module_utils/nd_switch_resources.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/__init__.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/exceptions.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/fabric_utils.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/payload_utils.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/switch_helpers.py create mode 100644 plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py create mode 100644 plugins/modules/nd_manage_switches.py diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py new file mode 100644 index 00000000..9007be8d --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Credentials endpoint models. + +This module contains endpoint definitions for switch credential operations +in the ND Manage API. + +Endpoints covered: +- List switch credentials +- Create switch credentials +- Remove switch credentials +- Validate switch credentials +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class CredentialsSwitchesEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for credentials switches endpoint. + + ## Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + +class _V1ManageCredentialsSwitchesBase(BaseModel): + """ + Base class for Credentials Switches endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/credentials/switches endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + return BasePath.nd_manage("credentials", "switches") + + +class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): + """ + # Summary + + Create Switch Credentials Endpoint + + ## Description + + Endpoint to save switch credentials for the user. + + ## Path + + - /api/v1/manage/credentials/switches + - /api/v1/manage/credentials/switches?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Create credentials without ticket + request = V1ManageCredentialsSwitchesPost() + path = request.path + verb = request.verb + + # Create credentials with change control ticket + request = V1ManageCredentialsSwitchesPost() + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/credentials/switches?ticketId=CHG12345 + ``` + """ + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageCredentialsSwitchesPost"] = Field( + default="V1ManageCredentialsSwitchesPost", description="Class name for backward compatibility" + ) + endpoint_params: CredentialsSwitchesEndpointParams = Field( + default_factory=CredentialsSwitchesEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py new file mode 100644 index 00000000..48212482 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Bootstrap endpoint models. + +This module contains endpoint definitions for switch bootstrap operations +within fabrics in the ND Manage API. + +Endpoints covered: +- List bootstrap switches (POAP/PnP) +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class FabricBootstrapEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for fabric bootstrap endpoint. + + ## Parameters + + - max: Maximum number of results to return (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + params = FabricBootstrapEndpointParams(max=50, offset=0) + query_string = params.to_query_string() + # Returns: "max=50&offset=0" + ``` + """ + + max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") + offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") + filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") + + +class V1ManageFabricBootstrapGet(FabricNameMixin, BaseModel): + """ + # Summary + + List Bootstrap Switches Endpoint + + ## Description + + Endpoint to list switches currently going through bootstrap loop via POAP (NX-OS) or PnP (IOS-XE). + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/bootstrap + - /api/v1/manage/fabrics/{fabricName}/bootstrap?max=50&offset=0 + + ## Verb + + - GET + + ## Query Parameters + + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + # List all bootstrap switches + request = V1ManageFabricBootstrapGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # List with pagination + request = V1ManageFabricBootstrapGet() + request.fabric_name = "MyFabric" + request.endpoint_params.max = 50 + request.endpoint_params.offset = 0 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/bootstrap?max=50&offset=0 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricBootstrapGet"] = Field( + default="V1ManageFabricBootstrapGet", description="Class name for backward compatibility" + ) + endpoint_params: FabricBootstrapEndpointParams = Field( + default_factory=FabricBootstrapEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "bootstrap") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py new file mode 100644 index 00000000..bb037e1e --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Config endpoint models. + +This module contains endpoint definitions for fabric configuration operations +in the ND Manage API. + +Endpoints covered: +- Config save (recalculate) +- Config deploy +- Get fabric info +- Inventory discover status +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class FabricConfigDeployEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for fabric config deploy endpoint. + + ## Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + params = FabricConfigDeployEndpointParams(force_show_run=True) + query_string = params.to_query_string() + # Returns: "forceShowRun=true" + ``` + """ + + force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") + incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") + + +class V1ManageFabricConfigSavePost(FabricNameMixin, BaseModel): + """ + # Summary + + Fabric Config Save Endpoint + + ## Description + + Endpoint to save (recalculate) fabric configuration. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/configSave + + ## Verb + + - POST + + ## Usage + + ```python + request = V1ManageFabricConfigSavePost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricConfigSavePost"] = Field( + default="V1ManageFabricConfigSavePost", description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name, "actions", "configSave") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class V1ManageFabricConfigDeployPost(FabricNameMixin, BaseModel): + """ + # Summary + + Fabric Config Deploy Endpoint + + ## Description + + Endpoint to deploy pending configuration to switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/configDeploy + - /api/v1/manage/fabrics/{fabricName}/actions/configDeploy?forceShowRun=true + + ## Verb + + - POST + + ## Query Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + # Deploy with defaults + request = V1ManageFabricConfigDeployPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Deploy forcing show run + request = V1ManageFabricConfigDeployPost() + request.fabric_name = "MyFabric" + request.endpoint_params.force_show_run = True + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricConfigDeployPost"] = Field( + default="V1ManageFabricConfigDeployPost", description="Class name for backward compatibility" + ) + endpoint_params: FabricConfigDeployEndpointParams = Field( + default_factory=FabricConfigDeployEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "actions", "configDeploy") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class V1ManageFabricGet(FabricNameMixin, BaseModel): + """ + # Summary + + Get Fabric Info Endpoint + + ## Description + + Endpoint to retrieve fabric information. + + ## Path + + - /api/v1/manage/fabrics/{fabricName} + + ## Verb + + - GET + + ## Usage + + ```python + request = V1ManageFabricGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricGet"] = Field( + default="V1ManageFabricGet", description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name) + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class V1ManageFabricInventoryDiscoverGet(FabricNameMixin, BaseModel): + """ + # Summary + + Fabric Inventory Discover Endpoint + + ## Description + + Endpoint to get discovery status for switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/inventory/discover + + ## Verb + + - GET + + ## Usage + + ```python + request = V1ManageFabricInventoryDiscoverGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricInventoryDiscoverGet"] = Field( + default="V1ManageFabricInventoryDiscoverGet", description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name, "inventory", "discover") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py new file mode 100644 index 00000000..d7d2e1f2 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Discovery endpoint models. + +This module contains endpoint definitions for switch discovery operations +within fabrics in the ND Manage API. + +Endpoints covered: +- Shallow discovery +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class V1ManageFabricShallowDiscoveryPost(FabricNameMixin, BaseModel): + """ + # Summary + + Shallow Discovery Endpoint + + ## Description + + Endpoint to shallow discover switches given seed switches with hop count. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery + + ## Verb + + - POST + + ## Usage + + ```python + request = V1ManageFabricShallowDiscoveryPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricShallowDiscoveryPost"] = Field( + default="V1ManageFabricShallowDiscoveryPost", description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name, "actions", "shallowDiscovery") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py new file mode 100644 index 00000000..73aa93ea --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py @@ -0,0 +1,754 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Switch Actions endpoint models. + +This module contains endpoint definitions for switch action operations +within fabrics in the ND Manage API. + +Endpoints covered: +- Remove switches (bulk delete) +- Change switch roles (bulk) +- Import bootstrap (POAP) +- Pre-provision switches +- Provision RMA +- Change switch serial number +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, + SwitchSerialNumberMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +# ============================================================================ +# Endpoint-specific query parameter classes +# ============================================================================ + + +class SwitchActionsRemoveEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch actions remove endpoint. + + ## Parameters + + - force: Force removal even if switches have pending operations (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = SwitchActionsRemoveEndpointParams(force=True, ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "force=true&ticketId=CHG12345" + ``` + """ + + force: Optional[bool] = Field(default=None, description="Force removal of switches") + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + +class SwitchActionsTicketEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept a ticket ID. + + ## Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = SwitchActionsTicketEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + +class SwitchActionsImportEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch import/provision endpoints. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = SwitchActionsImportEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1&ticketId=CHG12345" + ``` + """ + + cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + +# ============================================================================ +# Switch Actions Endpoints +# ============================================================================ + + +class V1ManageFabricSwitchActionsRemovePost(FabricNameMixin, BaseModel): + """ + # Summary + + Remove Switches Endpoint (Bulk Delete) + + ## Description + + Endpoint to delete multiple switches from a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/remove + - /api/v1/manage/fabrics/{fabricName}/switchActions/remove?force=true&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - force: Force removal even if switches have pending operations (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Remove switches + request = V1ManageFabricSwitchActionsRemovePost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Remove switches with force and ticket + request = V1ManageFabricSwitchActionsRemovePost() + request.fabric_name = "MyFabric" + request.endpoint_params.force = True + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/remove?force=true&ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchActionsRemovePost"] = Field( + default="V1ManageFabricSwitchActionsRemovePost", description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsRemoveEndpointParams = Field( + default_factory=SwitchActionsRemoveEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "remove") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class V1ManageFabricSwitchActionsChangeRolesPost(FabricNameMixin, BaseModel): + """ + # Summary + + Change Switch Roles Endpoint (Bulk) + + ## Description + + Endpoint to change the role of multiple switches in a single request. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/changeRoles + - /api/v1/manage/fabrics/{fabricName}/switchActions/changeRoles?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Change roles + request = V1ManageFabricSwitchActionsChangeRolesPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Change roles with change control ticket + request = V1ManageFabricSwitchActionsChangeRolesPost() + request.fabric_name = "MyFabric" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchActionsChangeRolesPost"] = Field( + default="V1ManageFabricSwitchActionsChangeRolesPost", + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "changeRoles") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class V1ManageFabricSwitchActionsImportBootstrapPost(FabricNameMixin, BaseModel): + """ + # Summary + + Import Bootstrap Switches Endpoint + + ## Description + + Endpoint to import and bootstrap preprovision or bootstrap switches to a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/importBootstrap + - /api/v1/manage/fabrics/{fabricName}/switchActions/importBootstrap?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Import bootstrap switches + request = V1ManageFabricSwitchActionsImportBootstrapPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Import with cluster and ticket + request = V1ManageFabricSwitchActionsImportBootstrapPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchActionsImportBootstrapPost"] = Field( + default="V1ManageFabricSwitchActionsImportBootstrapPost", description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsImportEndpointParams = Field( + default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "importBootstrap") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Pre-Provision Endpoints +# ============================================================================ + + +class V1ManageFabricSwitchActionsPreProvisionPost(FabricNameMixin, BaseModel): + """ + # Summary + + Pre-Provision Switches Endpoint + + ## Description + + Endpoint to pre-provision switches in a fabric. Pre-provisioning allows + you to define switch parameters (serial, IP, model, etc.) ahead of time + so that when the physical device boots it is automatically absorbed into + the fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/preProvision + - /api/v1/manage/fabrics/{fabricName}/switchActions/preProvision?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Pre-provision switches + request = V1ManageFabricSwitchActionsPreProvisionPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Pre-provision with cluster and ticket + request = V1ManageFabricSwitchActionsPreProvisionPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/preProvision?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchActionsPreProvisionPost"] = Field( + default="V1ManageFabricSwitchActionsPreProvisionPost", + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsImportEndpointParams = Field( + default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "preProvision") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# RMA (Return Material Authorization) Endpoints +# ============================================================================ + + +class V1ManageFabricSwitchProvisionRMAPost(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): + """ + # Summary + + Provision RMA for Switch Endpoint + + ## Description + + Endpoint to RMA (Return Material Authorization) an existing switch with a new bootstrapped switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Provision RMA + request = V1ManageFabricSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Provision RMA with change control ticket + request = V1ManageFabricSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchProvisionRMAPost"] = Field( + default="V1ManageFabricSwitchProvisionRMAPost", description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + base_path = BasePath.nd_manage( + "fabrics", self.fabric_name, "switches", self.switch_sn, "actions", "provisionRMA" + ) + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Change Switch Serial Number Endpoints +# ============================================================================ + + +class SwitchActionsClusterEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept only a cluster name. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + + ## Usage + + ```python + params = SwitchActionsClusterEndpointParams(cluster_name="cluster1") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1" + ``` + """ + + cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") + + +class V1ManageFabricSwitchChangeSerialNumberPost(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): + """ + # Summary + + Change Switch Serial Number Endpoint + + ## Description + + Endpoint to change the serial number for a pre-provisioned switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber?clusterName=cluster1 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + + ## Usage + + ```python + # Change serial number + request = V1ManageFabricSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Change serial number with cluster name + request = V1ManageFabricSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.cluster_name = "cluster1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchChangeSerialNumberPost"] = Field( + default="V1ManageFabricSwitchChangeSerialNumberPost", description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsClusterEndpointParams = Field( + default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + base_path = BasePath.nd_manage( + "fabrics", self.fabric_name, "switches", self.switch_sn, "actions", "changeSwitchSerialNumber" + ) + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Rediscover Endpoints +# ============================================================================ + + +class V1ManageFabricSwitchActionsRediscoverPost(FabricNameMixin, BaseModel): + """ + # Summary + + Rediscover Switches Endpoint + + ## Description + + Endpoint to trigger rediscovery for one or more switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/rediscover + - /api/v1/manage/fabrics/{fabricName}/switchActions/rediscover?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Rediscover switches + request = V1ManageFabricSwitchActionsRediscoverPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Rediscover switches with change control ticket + request = V1ManageFabricSwitchActionsRediscoverPost() + request.fabric_name = "MyFabric" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345 + ``` + """ + + model_config = COMMON_CONFIG + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchActionsRediscoverPost"] = Field( + default="V1ManageFabricSwitchActionsRediscoverPost", + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "rediscover") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py new file mode 100644 index 00000000..b771fb1d --- /dev/null +++ b/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Switches endpoint models. + +This module contains endpoint definitions for switch CRUD operations +within fabrics in the ND Manage API. + +Endpoints covered: +- List switches in a fabric +- Add switches to a fabric +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat Chengam Saravanan" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, + SwitchSerialNumberMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class FabricSwitchesGetEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for list fabric switches endpoint. + + ## Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) + query_string = params.to_query_string() + # Returns: "hostname=leaf1&max=100" + ``` + """ + + hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") + max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") + offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") + filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") + + +class FabricSwitchesAddEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for add switches to fabric endpoint. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1&ticketId=CHG12345" + ``` + """ + + cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + +class _V1ManageFabricSwitchesBase(FabricNameMixin, BaseModel): + """ + Base class for Fabric Switches endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name, "switches") + + +class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): + """ + # Summary + + List Fabric Switches Endpoint + + ## Description + + Endpoint to list all switches in a specific fabric with optional filtering. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?hostname=leaf1&max=100 + + ## Verb + + - GET + + ## Query Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + # List all switches + request = V1ManageFabricSwitchesGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # List with filtering + request = V1ManageFabricSwitchesGet() + request.fabric_name = "MyFabric" + request.endpoint_params.hostname = "leaf1" + request.endpoint_params.max = 100 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1&max=100 + ``` + """ + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchesGet"] = Field( + default="V1ManageFabricSwitchesGet", description="Class name for backward compatibility" + ) + endpoint_params: FabricSwitchesGetEndpointParams = Field( + default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class V1ManageFabricSwitchesPost(_V1ManageFabricSwitchesBase): + """ + # Summary + + Add Switches to Fabric Endpoint + + ## Description + + Endpoint to add switches to a specific fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Add switches + request = V1ManageFabricSwitchesPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Add switches with cluster and ticket + request = V1ManageFabricSwitchesPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + # Version metadata + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + class_name: Literal["V1ManageFabricSwitchesPost"] = Field( + default="V1ManageFabricSwitchesPost", description="Class name for backward compatibility" + ) + endpoint_params: FabricSwitchesAddEndpointParams = Field( + default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class _V1ManageFabricSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): + """ + Base class for single switch endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches/{switchSn} endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + return BasePath.nd_manage("fabrics", self.fabric_name, "switches", self.switch_sn) diff --git a/plugins/module_utils/models/__init__.py b/plugins/module_utils/models/__init__.py new file mode 100644 index 00000000..40a96afc --- /dev/null +++ b/plugins/module_utils/models/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/plugins/module_utils/models/nd_manage_switches/__init__.py b/plugins/module_utils/models/nd_manage_switches/__init__.py new file mode 100644 index 00000000..6ddd6cd8 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/__init__.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""nd_manage_switches models package. + +Re-exports all model classes, enums, and validators from their individual +modules so that consumers can import directly from the package: + + from .models.nd_manage_switches import SwitchConfigModel, SwitchRole, ... +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +# --- Enums --- +from .enums import ( # noqa: F401 + AdvisoryLevel, + AnomalyLevel, + ConfigSyncStatus, + DiscoveryStatus, + PlatformType, + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, + SystemMode, + VpcRole, +) + +# --- Validators --- +from .validators import SwitchValidators # noqa: F401 + +# --- Nested / shared models --- +from .switch_data_models import ( # noqa: F401 + AdditionalAciSwitchData, + AdditionalSwitchData, + Metadata, + SwitchMetadata, + TelemetryIpCollection, + VpcData, +) + +# --- Discovery models --- +from .discovery_models import ( # noqa: F401 + AddSwitchesRequestModel, + ShallowDiscoveryRequestModel, + SwitchDiscoveryModel, +) + +# --- Switch data models --- +from .switch_data_models import ( # noqa: F401 + SwitchDataModel, +) + +# --- Bootstrap models --- +from .bootstrap_models import ( # noqa: F401 + BootstrapBaseData, + BootstrapBaseModel, + BootstrapCredentialModel, + BootstrapImportSpecificModel, + BootstrapImportSwitchModel, + ImportBootstrapSwitchesRequestModel, +) + +# --- Preprovision models --- +from .preprovision_models import ( # noqa: F401 + PreProvisionSwitchesRequestModel, + PreProvisionSwitchModel, +) + +# --- RMA models --- +from .rma_models import ( # noqa: F401 + RMASpecificModel, + RMASwitchModel, +) + +# --- Switch actions models --- +from .switch_actions_models import ( # noqa: F401 + ChangeSwitchSerialNumberRequestModel, + SwitchCredentialsRequestModel, +) + +# --- Config / playbook models --- +from .config_models import ( # noqa: F401 + ConfigDataModel, + POAPConfigModel, + RMAConfigModel, + SwitchConfigModel, +) + + +__all__ = [ + # Enums + "AdvisoryLevel", + "AnomalyLevel", + "ConfigSyncStatus", + "DiscoveryStatus", + "PlatformType", + "RemoteCredentialStore", + "SnmpV3AuthProtocol", + "SwitchRole", + "SystemMode", + "VpcRole", + # Validators + "SwitchValidators", + # Nested models + "AdditionalAciSwitchData", + "AdditionalSwitchData", + "Metadata", + "SwitchMetadata", + "TelemetryIpCollection", + "VpcData", + # Discovery models + "AddSwitchesRequestModel", + "ShallowDiscoveryRequestModel", + "SwitchDiscoveryModel", + # Switch data models + "SwitchDataModel", + # Bootstrap models + "BootstrapBaseData", + "BootstrapBaseModel", + "BootstrapCredentialModel", + "BootstrapImportSpecificModel", + "BootstrapImportSwitchModel", + "ImportBootstrapSwitchesRequestModel", + # Preprovision models + "PreProvisionSwitchesRequestModel", + "PreProvisionSwitchModel", + # RMA models + "RMASpecificModel", + "RMASwitchModel", + # Switch actions models + "ChangeSwitchSerialNumberRequestModel", + "SwitchCredentialsRequestModel", + # Config models + "ConfigDataModel", + "POAPConfigModel", + "RMAConfigModel", + "SwitchConfigModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py new file mode 100644 index 00000000..864a8e25 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Bootstrap (POAP) switch models for import operations. + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, computed_field, field_validator, model_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel + +from .enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from .validators import SwitchValidators + + +class BootstrapBaseData(NDNestedModel): + """ + Device-reported data embedded in a bootstrap API entry. + """ + identifiers: ClassVar[List[str]] = [] + gateway_ip_mask: Optional[str] = Field( + default=None, + alias="gatewayIpMask", + description="Gateway IP address with mask" + ) + models: Optional[List[str]] = Field( + default=None, + description="Supported models for switch" + ) + + @field_validator('gateway_ip_mask', mode='before') + @classmethod + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_cidr(v) + + +class BootstrapBaseModel(NDBaseModel): + """ + Common hardware and policy properties shared across bootstrap, pre-provision, and RMA operations. + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + gateway_ip_mask: str = Field( + ..., + alias="gatewayIpMask", + description="Gateway IP address with mask" + ) + model: str = Field( + ..., + description="Model of the bootstrap switch" + ) + software_version: str = Field( + ..., + alias="softwareVersion", + description="Software version of the bootstrap switch" + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap" + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole" + ) + data: Optional[BootstrapBaseData] = Field( + default=None, + description="Additional bootstrap data" + ) + + @field_validator('gateway_ip_mask', mode='before') + @classmethod + def validate_gateway(cls, v: str) -> str: + result = SwitchValidators.validate_cidr(v) + if result is None: + raise ValueError("gateway_ip_mask cannot be empty") + return result + + +class BootstrapCredentialModel(NDBaseModel): + """ + Credential properties for a switch bootstrap or pre-provision operation. + + When useNewCredentials is true, separate discovery credentials are used for + post-bootstrap switch discovery instead of the admin password. + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + password: str = Field( + ..., + description="Switch password to be set during bootstrap for admin user" + ) + discovery_auth_protocol: SnmpV3AuthProtocol = Field( + ..., + alias="discoveryAuthProtocol" + ) + use_new_credentials: bool = Field( + default=False, + alias="useNewCredentials", + description="If True, use discoveryUsername and discoveryPassword" + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username to be used for switch discovery post bootstrap" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password associated with the corresponding switch discovery user" + ) + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials" + ) + + @model_validator(mode='after') + def validate_credentials(self) -> Self: + """Validate credential configuration logic.""" + if self.use_new_credentials: + if self.remote_credential_store == RemoteCredentialStore.CYBERARK: + if not self.remote_credential_store_key: + raise ValueError( + "remote_credential_store_key is required when " + "remote_credential_store is 'cyberark'" + ) + elif self.remote_credential_store == RemoteCredentialStore.LOCAL: + if not self.discovery_username or not self.discovery_password: + raise ValueError( + "discovery_username and discovery_password are required when " + "remote_credential_store is 'local' and use_new_credentials is True" + ) + return self + + +class BootstrapImportSpecificModel(NDBaseModel): + """ + Switch-identifying fields returned by the bootstrap GET API prior to import. + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + hostname: str = Field( + ..., + description="Hostname of the bootstrap switch" + ) + ip: str = Field( + ..., + description="IP address of the bootstrap switch" + ) + serial_number: str = Field( + ..., + alias="serialNumber", + description="Serial number of the bootstrap switch" + ) + in_inventory: bool = Field( + ..., + alias="inInventory", + description="True if the bootstrap switch is in inventory" + ) + public_key: str = Field( + ..., + alias="publicKey", + description="Public Key" + ) + finger_print: str = Field( + ..., + alias="fingerPrint", + description="Fingerprint" + ) + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp", + description="This is used for device day-0 bring-up when using inband reachability" + ) + seed_switch: bool = Field( + default=False, + alias="seedSwitch", + description="Use as seed switch" + ) + + @field_validator('hostname', mode='before') + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None + return SwitchValidators.validate_ip_address(v) + + @field_validator('serial_number', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result + + +class BootstrapImportSwitchModel(NDBaseModel): + """ + Request payload for importing a single POAP bootstrap switch into the fabric. + + Path: POST /fabrics/{fabricName}/switchActions/importBootstrap + """ + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + + serial_number: str = Field( + ..., + alias="serialNumber", + description="Serial number of the bootstrap switch" + ) + model: str = Field( + ..., + description="Model of the bootstrap switch" + ) + version: str = Field( + ..., + description="Software version of the bootstrap switch" + ) + hostname: str = Field( + ..., + description="Hostname of the bootstrap switch" + ) + ip_address: str = Field( + ..., + alias="ipAddress", + description="IP address of the bootstrap switch" + ) + password: str = Field( + ..., + description="Switch password to be set during bootstrap for admin user" + ) + discovery_auth_protocol: SnmpV3AuthProtocol = Field( + ..., + alias="discoveryAuthProtocol" + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword" + ) + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Bootstrap configuration data block (gatewayIpMask, models)" + ) + fingerprint: str = Field( + default="", + description="SSH fingerprint from bootstrap GET API" + ) + public_key: str = Field( + default="", + alias="publicKey", + description="SSH public key from bootstrap GET API" + ) + re_add: bool = Field( + default=False, + alias="reAdd", + description="Re-add flag from bootstrap GET API" + ) + in_inventory: bool = Field( + default=False, + alias="inInventory" + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap" + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole" + ) + ip: Optional[str] = Field( + default=None, + description="IP address (duplicate of ipAddress for API compatibility)" + ) + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version (duplicate of version for API compatibility)" + ) + gateway_ip_mask: Optional[str] = Field( + default=None, + alias="gatewayIpMask", + description="Gateway IP address with mask" + ) + + @field_validator('ip_address', mode='before') + @classmethod + def validate_ip_address(cls, v: str) -> str: + result = SwitchValidators.validate_ip_address(v) + if result is None: + raise ValueError(f"Invalid IP address: {v}") + return result + + @field_validator('hostname', mode='before') + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator('serial_number', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result + + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format matching importBootstrap spec.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """Create model instance from API response.""" + return cls.model_validate(response) + + +class ImportBootstrapSwitchesRequestModel(NDBaseModel): + """ + Request body wrapping a list of bootstrap switch payloads for bulk POAP import. + + Path: POST /fabrics/{fabricName}/switchActions/importBootstrap + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + switches: List[BootstrapImportSwitchModel] = Field( + ..., + description="PowerOn Auto Provisioning switches" + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return { + "switches": [s.to_payload() for s in self.switches] + } + + +__all__ = [ + "BootstrapBaseData", + "BootstrapBaseModel", + "BootstrapCredentialModel", + "BootstrapImportSpecificModel", + "BootstrapImportSwitchModel", + "ImportBootstrapSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py new file mode 100644 index 00000000..1ed4aa72 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -0,0 +1,654 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Ansible playbook configuration models. + +These models represent the user-facing configuration schema used in Ansible +playbooks for normal switch addition, POAP, and RMA operations. + +Based on: dcnm_inventory.py config suboptions +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ipaddress import ip_address, ip_interface +from pydantic import Field, ValidationInfo, computed_field, field_validator, model_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel + +from .enums import ( + PlatformType, + SnmpV3AuthProtocol, + SwitchRole, +) +from .validators import SwitchValidators + + +class ConfigDataModel(NDNestedModel): + """ + Hardware and gateway network data required for POAP and RMA operations. + + Maps to config.poap.config_data and config.rma.config_data in the playbook. + """ + identifiers: ClassVar[List[str]] = [] + + models: List[str] = Field( + ..., + alias="models", + min_length=1, + description="List of model of modules in switch to Bootstrap/Pre-provision/RMA" + ) + gateway: str = Field( + ..., + description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)" + ) + + @field_validator('gateway', mode='before') + @classmethod + def validate_gateway(cls, v: str) -> str: + """Validate gateway is a valid CIDR.""" + if not v or not v.strip(): + raise ValueError("gateway cannot be empty") + try: + ip_interface(v.strip()) + except ValueError as e: + raise ValueError(f"Invalid gateway IP address with mask: {v}") from e + return v.strip() + + +class POAPConfigModel(NDNestedModel): + """ + POAP configuration entry for a single switch in the playbook config list. + + Supports Bootstrap (serial_number only), Pre-provision (preprovision_serial only), + and Swap (both serial fields) operation modes. + """ + identifiers: ClassVar[List[str]] = [] + + # Discovery credentials + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during POAP" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during POAP" + ) + + # Bootstrap operation - requires actual switch serial number + serial_number: Optional[str] = Field( + default=None, + alias="serialNumber", + min_length=1, + description="Serial number of switch to Bootstrap" + ) + + # Pre-provision operation - requires pre-provision serial number + preprovision_serial: Optional[str] = Field( + default=None, + alias="preprovisionSerial", + min_length=1, + description="Serial number of switch to Pre-provision" + ) + + # Common fields for both operations + model: Optional[str] = Field( + default=None, + description="Model of switch to Bootstrap/Pre-provision" + ) + version: Optional[str] = Field( + default=None, + description="Software version of switch to Bootstrap/Pre-provision" + ) + hostname: Optional[str] = Field( + default=None, + description="Hostname of switch to Bootstrap/Pre-provision" + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Name of the image policy to be applied on switch" + ) + config_data: Optional[ConfigDataModel] = Field( + default=None, + alias="configData", + description=( + "Basic config data of switch to Bootstrap/Pre-provision. " + "'models' (list of module models) and 'gateway' (IP with mask) are mandatory." + ), + ) + + @model_validator(mode='after') + def validate_operation_type(self) -> Self: + """Validate serial_number / preprovision_serial combinations. + + Allowed combinations: + - serial_number only → Bootstrap + - preprovision_serial only → Pre-provision + - both serial_number AND preprovision_serial → Swap (change serial + number of an existing pre-provisioned switch) + - neither → error + """ + has_serial = bool(self.serial_number) + has_preprov = bool(self.preprovision_serial) + + if not has_serial and not has_preprov: + raise ValueError( + "Either 'serial_number' (for Bootstrap / Swap) or 'preprovision_serial' " + "(for Pre-provision / Swap) must be provided." + ) + + return self + + @model_validator(mode='after') + def validate_required_fields_for_non_swap(self) -> Self: + """Validate model/version/hostname/config_data are all provided for non-swap POAP. + + For Bootstrap (serial_number only) or Pre-provision (preprovision_serial only) + all four descriptor fields are mandatory. This mirrors the + dcnm_inventory.py check: + if only one serial provided → model, version, hostname, config_data required. + + When both serials are present (swap mode), these fields are not + required because the swap API only needs the new serial number. + """ + has_serial = bool(self.serial_number) + has_preprov = bool(self.preprovision_serial) + + # XOR: exactly one serial → non-swap case + if has_serial != has_preprov: + missing = [] + if not self.model: + missing.append("model") + if not self.version: + missing.append("version") + if not self.hostname: + missing.append("hostname") + if not self.config_data: + missing.append("config_data") + if missing: + op = "Bootstrap" if has_serial else "Pre-provisioning" + raise ValueError( + f"model, version, hostname and config_data are required for " + f"{op} a switch. Missing: {', '.join(missing)}" + ) + return self + + @model_validator(mode='after') + def validate_discovery_credentials_pair(self) -> Self: + """Validate that discovery_username and discovery_password are both set or both absent. + + Mirrors the dcnm_inventory.py bidirectional check: + - discovery_username set → discovery_password required + - discovery_password set → discovery_username required + """ + has_user = bool(self.discovery_username) + has_pass = bool(self.discovery_password) + if has_user and not has_pass: + raise ValueError( + "discovery_password must be set when discovery_username is specified" + ) + if has_pass and not has_user: + raise ValueError( + "discovery_username must be set when discovery_password is specified" + ) + return self + + @field_validator('serial_number', 'preprovision_serial', mode='before') + @classmethod + def validate_serial_numbers(cls, v: Optional[str]) -> Optional[str]: + """Validate serial numbers are not empty strings.""" + if v is not None and not v.strip(): + raise ValueError("Serial number cannot be empty") + return v + + +class RMAConfigModel(NDNestedModel): + """ + RMA configuration entry for replacing a single switch via bootstrap. + + The switch being replaced must be in maintenance mode and either shut down + or disconnected from the network before initiating the RMA operation. + """ + identifiers: ClassVar[List[str]] = [] + + # Discovery credentials + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during POAP and RMA discovery" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during POAP and RMA discovery" + ) + + # Required fields for RMA + serial_number: str = Field( + ..., + alias="serialNumber", + min_length=1, + description="Serial number of switch to Bootstrap for RMA" + ) + old_serial: str = Field( + ..., + alias="oldSerial", + min_length=1, + description="Serial number of switch to be replaced by RMA" + ) + model: str = Field( + ..., + min_length=1, + description="Model of switch to Bootstrap for RMA" + ) + version: str = Field( + ..., + min_length=1, + description="Software version of switch to Bootstrap for RMA" + ) + + # Optional fields + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Name of the image policy to be applied on switch during Bootstrap for RMA" + ) + + # Required config data for RMA (models list + gateway) + config_data: ConfigDataModel = Field( + ..., + alias="configData", + description=( + "Basic config data of switch to Bootstrap for RMA. " + "'models' (list of module models) and 'gateway' (IP with mask) are mandatory." + ), + ) + + @field_validator('serial_number', 'old_serial', mode='before') + @classmethod + def validate_serial_numbers(cls, v: str) -> str: + """Validate serial numbers are not empty.""" + if not v or not v.strip(): + raise ValueError("Serial number cannot be empty") + return v.strip() + + @model_validator(mode='after') + def validate_discovery_credentials_pair(self) -> Self: + """Validate that discovery_username and discovery_password are both set or both absent. + + Mirrors the dcnm_inventory.py bidirectional check: + - discovery_username set → discovery_password required + - discovery_password set → discovery_username required + """ + has_user = bool(self.discovery_username) + has_pass = bool(self.discovery_password) + if has_user and not has_pass: + raise ValueError( + "discovery_password must be set when discovery_username is specified" + ) + if has_pass and not has_user: + raise ValueError( + "discovery_username must be set when discovery_password is specified" + ) + return self + + +class SwitchConfigModel(NDBaseModel): + """ + Per-switch configuration entry in the Ansible playbook config list. + + Supports normal switch addition, POAP (Bootstrap and Pre-provision), and RMA + operations. The operation type is derived from the presence of poap or rma fields. + """ + identifiers: ClassVar[List[str]] = ["seed_ip"] + + # Fields excluded from diff — only seed_ip + role are compared + exclude_from_diff: ClassVar[List[str]] = [ + "user_name", "password", "auth_proto", "max_hops", + "preserve_config", "platform_type", "poap", "rma", + "operation_type", + "switch_id", "serial_number", "mode", "hostname", + "model", "software_version", + ] + + # Required fields + seed_ip: str = Field( + ..., + alias="seedIp", + min_length=1, + description="Seed IP address or DNS name of the switch" + ) + + # Optional fields — required for merged/overridden, optional for query/deleted + user_name: Optional[str] = Field( + default=None, + alias="userName", + description="Login username to the switch (required for merged/overridden states)" + ) + password: Optional[str] = Field( + default=None, + description="Login password to the switch (required for merged/overridden states)" + ) + + # Optional fields with defaults + auth_proto: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="authProto", + description="Authentication protocol to use" + ) + max_hops: int = Field( + default=0, + alias="maxHops", + ge=0, + le=7, + description="Maximum hops to reach the switch (deprecated, defaults to 0)" + ) + role: Optional[SwitchRole] = Field( + default=None, + description="Role to assign to the switch. None means not specified (uses controller default)." + ) + preserve_config: bool = Field( + default=False, + alias="preserveConfig", + description="Set to false for greenfield, true for brownfield deployment" + ) + platform_type: PlatformType = Field( + default=PlatformType.NX_OS, + alias="platformType", + description="Platform type of the switch (nx-os, ios-xe, etc.)" + ) + + # POAP and RMA configurations + poap: Optional[List[POAPConfigModel]] = Field( + default=None, + description="POAP (PowerOn Auto Provisioning) configurations for Bootstrap/Pre-provision" + ) + rma: Optional[List[RMAConfigModel]] = Field( + default=None, + description="RMA (Return Material Authorization) configurations for switch replacement" + ) + + # Computed fields + + @computed_field + @property + def operation_type(self) -> Literal["normal", "poap", "rma"]: + """Determine the operation type from this config. + + Returns: + ``'poap'`` if POAP configs are present, + ``'rma'`` if RMA configs are present, + ``'normal'`` otherwise. + """ + if self.poap: + return "poap" + if self.rma: + return "rma" + return "normal" + + # API-derived fields (populated by from_response, never set by users) + switch_id: Optional[str] = Field( + default=None, + alias="switchId", + description="Serial number / switch ID from inventory API" + ) + serial_number: Optional[str] = Field( + default=None, + alias="serialNumber", + description="Serial number from inventory API" + ) + mode: Optional[str] = Field( + default=None, + description="Switch mode from inventory API (Normal, Migration, etc.)" + ) + hostname: Optional[str] = Field( + default=None, + description="Switch hostname from inventory API" + ) + model: Optional[str] = Field( + default=None, + description="Switch model from inventory API" + ) + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version from inventory API" + ) + + @model_validator(mode='before') + @classmethod + def reject_auth_proto_for_poap_rma(cls, data: Any) -> Any: + """Reject non-MD5 auth_proto when POAP or RMA is configured. + + POAP, Pre-provision, and RMA operations always use MD5 internally. + If the user explicitly supplies a non-MD5 ``auth_proto`` (or + ``authProto``) alongside ``poap`` or ``rma``, raise an error so + they know the field is not user-configurable for these operation + types. + + Note: Ansible argspec injects the default ``"MD5"`` even when the + user omits ``auth_proto``, so we must allow MD5 through. + """ + if not isinstance(data, dict): + return data + + has_poap = bool(data.get("poap")) + has_rma = bool(data.get("rma")) + + if has_poap or has_rma: + # Check both snake_case (Ansible playbook) and camelCase (API) keys + auth_val = data.get("auth_proto") or data.get("authProto") + if auth_val is not None: + # Normalize to lowercase for comparison + normalized = str(auth_val).strip().lower() + if normalized not in ("md5", ""): + op = "POAP" if has_poap else "RMA" + raise ValueError( + f"'auth_proto' must not be specified for {op} operations. " + f"The authentication protocol is always MD5 and is set " + f"automatically. Received: '{auth_val}'" + ) + + return data + + @model_validator(mode='after') + def validate_poap_rma_mutual_exclusion(self) -> Self: + """Validate that POAP and RMA are mutually exclusive.""" + if self.poap and self.rma: + raise ValueError("Cannot specify both 'poap' and 'rma' configurations for the same switch") + + return self + + @model_validator(mode='after') + def validate_poap_rma_credentials(self) -> Self: + """Validate credentials for POAP and RMA operations.""" + if self.poap or self.rma: + # POAP/RMA require credentials + if not self.user_name or not self.password: + raise ValueError( + "For POAP and RMA operations, user_name and password are required" + ) + # For POAP and RMA, username should be 'admin' + if self.user_name != "admin": + raise ValueError("For POAP and RMA operations, user_name should be 'admin'") + + return self + + @model_validator(mode='after') + def apply_state_defaults(self, info: ValidationInfo) -> Self: + """Apply state-aware defaults and enforcement using validation context. + + When ``context={"state": "merged"}`` (or ``"overridden"``) is passed + to ``model_validate()``, the model: + - Defaults ``role`` to ``SwitchRole.LEAF`` when not specified. + - Enforces that ``user_name`` and ``password`` are provided. + + For ``query`` / ``deleted`` (or no context), fields remain as-is. + """ + state = (info.context or {}).get("state") if info else None + + # POAP only allowed with merged or query + if self.poap and state not in (None, "merged", "query"): + raise ValueError( + f"POAP operations require 'merged' or 'query' state, " + f"got '{state}' (switch: {self.seed_ip})" + ) + + # RMA only allowed with merged + if self.rma and state not in (None, "merged"): + raise ValueError( + f"RMA operations require 'merged' state, " + f"got '{state}' (switch: {self.seed_ip})" + ) + + if state in ("merged", "overridden"): + if self.role is None: + self.role = SwitchRole.LEAF + if not self.user_name or not self.password: + raise ValueError( + f"user_name and password are required " + f"for '{state}' state " + f"(switch: {self.seed_ip})" + ) + return self + + @field_validator('seed_ip', mode='before') + @classmethod + def validate_seed_ip(cls, v: str) -> str: + """Validate seed IP is valid IP address or DNS name.""" + if not v or not v.strip(): + raise ValueError("seed_ip cannot be empty") + + v = v.strip() + + # Try to validate as IP address first + try: + ip_address(v) + return v + except ValueError: + pass + + # If not an IP, assume it's a DNS name - basic validation + if not v.replace('-', '').replace('.', '').replace('_', '').isalnum(): + raise ValueError(f"Invalid seed_ip: {v}. Must be a valid IP address or DNS name") + + return v + + @field_validator('poap', 'rma', mode='before') + @classmethod + def validate_lists_not_empty(cls, v: Optional[List]) -> Optional[List]: + """Validate that if POAP or RMA lists are provided, they are not empty.""" + if v is not None and len(v) == 0: + raise ValueError("POAP/RMA list cannot be empty if provided") + return v + + @field_validator('auth_proto', mode='before') + @classmethod + def normalize_auth_proto(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize auth_proto to handle case-insensitive input (MD5, md5, etc.).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator('role', mode='before') + @classmethod + def normalize_role(cls, v: Union[str, SwitchRole, None]) -> Optional[SwitchRole]: + """Normalize role for case-insensitive and underscore-to-camelCase matching. + Returns None when not specified (distinguishes from explicit 'leaf').""" + if v is None: + return None + return SwitchRole.normalize(v) + + @field_validator('platform_type', mode='before') + @classmethod + def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: + """Normalize platform_type for case-insensitive matching (NX_OS, nx-os, etc.).""" + return PlatformType.normalize(v) + + @classmethod + def validate_no_mixed_operations( + cls, configs: List["SwitchConfigModel"] + ) -> None: + """Validate that a list of configs does not mix operation types. + + POAP, RMA, and normal switch operations cannot be combined + in the same Ansible task. Call this after validating all + individual configs. + + Args: + configs: List of validated SwitchConfigModel instances. + + Raises: + ValueError: If more than one operation type is present. + """ + op_types = {cfg.operation_type for cfg in configs} + if len(op_types) > 1: + raise ValueError( + "Mixed operation types detected: " + f"{', '.join(sorted(op_types))}. " + "POAP, RMA, and normal switch operations " + "cannot be mixed in the same task. " + "Please separate them into different tasks." + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format. + + Excludes API-derived fields that are not part of the user config. + """ + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude={ + "switch_id", "serial_number", "mode", + "hostname", "model", "software_version", + }, + ) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """Create model instance from inventory or discovery API response. + + Handles two formats: + 1. Inventory API: {switchId, fabricManagementIp, switchRole, ...} + 2. Discovery API: {serialNumber, ip, hostname, ...} + """ + mapped: Dict[str, Any] = {} + + # seed_ip from fabricManagementIp (inventory) or ip (discovery) + ip = response.get("fabricManagementIp") or response.get("ip") + if ip: + mapped["seedIp"] = ip + + # role from switchRole + role = response.get("switchRole") + if role: + mapped["role"] = role + + # Direct API fields + direct_fields = ( + "switchId", "serialNumber", "softwareVersion", + "mode", "hostname", "model", + ) + for key in direct_fields: + if key in response and response[key] is not None: + mapped[key] = response[key] + + return cls.model_validate(mapped) + + +__all__ = [ + "ConfigDataModel", + "POAPConfigModel", + "RMAConfigModel", + "SwitchConfigModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/discovery_models.py b/plugins/module_utils/models/nd_manage_switches/discovery_models.py new file mode 100644 index 00000000..4e6fb667 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/discovery_models.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch discovery models for shallow discovery and fabric add operations. + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, field_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from .enums import ( + PlatformType, + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from .validators import SwitchValidators + + +class ShallowDiscoveryRequestModel(NDBaseModel): + """ + Initiates a shallow CDP/LLDP-based discovery from one or more seed IP addresses. + + Path: POST /fabrics/{fabricName}/actions/shallowDiscovery + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password"] + seed_ip_collection: List[str] = Field( + ..., + alias="seedIpCollection", + min_length=1, + description="Seed switch IP collection" + ) + max_hop: int = Field( + default=2, + alias="maxHop", + ge=0, + le=7, + description="Max hop" + ) + platform_type: PlatformType = Field( + default=PlatformType.NX_OS, + alias="platformType", + description="Switch platform type" + ) + snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="snmpV3AuthProtocol", + description="SNMPv3 authentication protocols" + ) + username: Optional[str] = Field( + default=None, + description="User name for switch login" + ) + password: Optional[str] = Field( + default=None, + description="User password for switch login" + ) + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore", + description="Type of credential store" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key" + ) + + @field_validator('seed_ip_collection', mode='before') + @classmethod + def validate_seed_ips(cls, v: List[str]) -> List[str]: + """Validate all seed IPs.""" + if not v: + raise ValueError("At least one seed IP is required") + validated = [] + for ip in v: + result = SwitchValidators.validate_ip_address(ip) + if result: + validated.append(result) + if not validated: + raise ValueError("No valid seed IPs provided") + return validated + + @field_validator('snmp_v3_auth_protocol', mode='before') + @classmethod + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize SNMP auth protocol (case-insensitive).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator('platform_type', mode='before') + @classmethod + def normalize_platform(cls, v: Union[str, PlatformType, None]) -> PlatformType: + """Normalize platform type (case-insensitive).""" + return PlatformType.normalize(v) + + +class SwitchDiscoveryModel(NDBaseModel): + """ + Discovery data for a single switch returned by the shallow discovery API. + + For N7K user VDC deployments, the serial number format is serialNumber:vDCName. + """ + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + hostname: str = Field( + ..., + description="Switch host name" + ) + ip: str = Field( + ..., + description="Switch IPv4/v6 address" + ) + serial_number: str = Field( + ..., + alias="serialNumber", + description="Switch serial number" + ) + model: str = Field( + ..., + description="Switch model" + ) + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Switch software version" + ) + vdc_id: Optional[int] = Field( + default=None, + alias="vdcId", + ge=0, + description="N7K VDC ID. Mandatory for N7K switch discovery" + ) + vdc_mac: Optional[str] = Field( + default=None, + alias="vdcMac", + description="N7K VDC Mac address. Mandatory for N7K switch discovery" + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole", + description="Switch role" + ) + + @field_validator('hostname', mode='before') + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator('ip', mode='before') + @classmethod + def validate_ip(cls, v: str) -> str: + result = SwitchValidators.validate_ip_address(v) + if result is None: + raise ValueError("ip cannot be empty") + return result + + @field_validator('serial_number', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result + + @field_validator('vdc_mac', mode='before') + @classmethod + def validate_mac(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_mac_address(v) + + +class AddSwitchesRequestModel(NDBaseModel): + """ + Imports one or more previously discovered switches into a fabric. + + Path: POST /fabrics/{fabricName}/switches + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password"] + switches: List[SwitchDiscoveryModel] = Field( + ..., + min_length=1, + description="The list of switches to be imported" + ) + platform_type: PlatformType = Field( + default=PlatformType.NX_OS, + alias="platformType", + description="Switch platform type" + ) + preserve_config: bool = Field( + default=True, + alias="preserveConfig", + description="Flag to preserve the switch configuration after import" + ) + snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="snmpV3AuthProtocol", + description="SNMPv3 authentication protocols" + ) + use_credential_for_write: Optional[bool] = Field( + default=None, + alias="useCredentialForWrite", + description="Flag to use the discovery credential as LAN credential" + ) + username: Optional[str] = Field( + default=None, + description="User name for switch login" + ) + password: Optional[str] = Field( + default=None, + description="User password for switch login" + ) + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore", + description="Type of credential store" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key" + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + payload = self.model_dump(by_alias=True, exclude_none=True) + # Convert nested switches to payload format + if 'switches' in payload: + payload['switches'] = [ + s.to_payload() if hasattr(s, 'to_payload') else s + for s in self.switches + ] + return payload + + @field_validator('snmp_v3_auth_protocol', mode='before') + @classmethod + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize SNMP auth protocol (case-insensitive: MD5, md5, etc.).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator('platform_type', mode='before') + @classmethod + def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: + """Normalize platform type (case-insensitive: NX_OS, nx-os, etc.).""" + return PlatformType.normalize(v) + + +__all__ = [ + "ShallowDiscoveryRequestModel", + "SwitchDiscoveryModel", + "AddSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/enums.py b/plugins/module_utils/models/nd_manage_switches/enums.py new file mode 100644 index 00000000..93f93083 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/enums.py @@ -0,0 +1,320 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Enumerations for Switch and Inventory Operations. + +Extracted from OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from enum import Enum +from typing import List, Union + + +# ============================================================================= +# ENUMS - Extracted from OpenAPI Schema components/schemas +# ============================================================================= + +class SwitchRole(str, Enum): + """ + Switch role enumeration. + + Based on: components/schemas/switchRole + Description: The role of the switch, meta is a read-only switch role + """ + BORDER = "border" + BORDER_GATEWAY = "borderGateway" + BORDER_GATEWAY_SPINE = "borderGatewaySpine" + BORDER_GATEWAY_SUPER_SPINE = "borderGatewaySuperSpine" + BORDER_SPINE = "borderSpine" + BORDER_SUPER_SPINE = "borderSuperSpine" + LEAF = "leaf" + SPINE = "spine" + SUPER_SPINE = "superSpine" + TIER2_LEAF = "tier2Leaf" + TOR = "tor" + ACCESS = "access" + AGGREGATION = "aggregation" + CORE_ROUTER = "coreRouter" + EDGE_ROUTER = "edgeRouter" + META = "meta" # read-only + NEIGHBOR = "neighbor" + + @classmethod + def choices(cls) -> List[str]: + """Return list of valid choices.""" + return [e.value for e in cls] + + @classmethod + def from_user_input(cls, value: str) -> "SwitchRole": + """ + Convert user-friendly input to enum value. + Accepts underscore-separated values like 'border_gateway' -> 'borderGateway' + """ + if not value: + return cls.LEAF + # Try direct match first + try: + return cls(value) + except ValueError: + pass + # Try converting underscore to camelCase + parts = value.lower().split('_') + camel_case = parts[0] + ''.join(word.capitalize() for word in parts[1:]) + try: + return cls(camel_case) + except ValueError: + raise ValueError(f"Invalid switch role: {value}. Valid options: {cls.choices()}") + + @classmethod + def normalize(cls, value: Union[str, "SwitchRole", None]) -> "SwitchRole": + """ + Normalize input to enum value (case-insensitive). + Accepts: LEAF, leaf, border_gateway, borderGateway, etc. + """ + if value is None: + return cls.LEAF + if isinstance(value, cls): + return value + if isinstance(value, str): + v_lower = value.lower() + # Try direct match with lowercase + for role in cls: + if role.value.lower() == v_lower: + return role + # Try converting underscore to camelCase + parts = v_lower.split('_') + if len(parts) > 1: + camel_case = parts[0] + ''.join(word.capitalize() for word in parts[1:]) + for role in cls: + if role.value == camel_case: + return role + raise ValueError(f"Invalid SwitchRole: {value}. Valid: {cls.choices()}") + + +class SystemMode(str, Enum): + """ + System mode enumeration. + + Based on: components/schemas/systemMode + """ + NORMAL = "normal" + MAINTENANCE = "maintenance" + MIGRATION = "migration" + INCONSISTENT = "inconsistent" + WAITING = "waiting" + NOT_APPLICABLE = "notApplicable" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class PlatformType(str, Enum): + """ + Switch platform type enumeration. + + Based on: components/schemas (multiple references) + """ + NX_OS = "nx-os" + OTHER = "other" + IOS_XE = "ios-xe" + IOS_XR = "ios-xr" + SONIC = "sonic" + APIC = "apic" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "PlatformType", None]) -> "PlatformType": + """ + Normalize input to enum value (case-insensitive). + Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. + """ + if value is None: + return cls.NX_OS + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace('_', '-') + for pt in cls: + if pt.value == v_normalized: + return pt + raise ValueError(f"Invalid PlatformType: {value}. Valid: {cls.choices()}") + + +class SnmpV3AuthProtocol(str, Enum): + """ + SNMPv3 authentication protocols. + + Based on: components/schemas/snmpV3AuthProtocol and schemas-snmpV3AuthProtocol + """ + MD5 = "md5" + SHA = "sha" + MD5_DES = "md5-des" + MD5_AES = "md5-aes" + SHA_AES = "sha-aes" + SHA_DES = "sha-des" + SHA_AES_256 = "sha-aes-256" + SHA_224 = "sha-224" + SHA_224_AES = "sha-224-aes" + SHA_224_AES_256 = "sha-224-aes-256" + SHA_256 = "sha-256" + SHA_256_AES = "sha-256-aes" + SHA_256_AES_256 = "sha-256-aes-256" + SHA_384 = "sha-384" + SHA_384_AES = "sha-384-aes" + SHA_384_AES_256 = "sha-384-aes-256" + SHA_512 = "sha-512" + SHA_512_AES = "sha-512-aes" + SHA_512_AES_256 = "sha-512-aes-256" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "SnmpV3AuthProtocol", None]) -> "SnmpV3AuthProtocol": + """ + Normalize input to enum value (case-insensitive). + Accepts: MD5, md5, MD5_DES, md5-des, etc. + """ + if value is None: + return cls.MD5 + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace('_', '-') + for proto in cls: + if proto.value == v_normalized: + return proto + raise ValueError(f"Invalid SnmpV3AuthProtocol: {value}. Valid: {cls.choices()}") + + +class DiscoveryStatus(str, Enum): + """ + Switch discovery status. + + Based on: components/schemas/additionalSwitchData.discoveryStatus + """ + OK = "ok" + DISCOVERING = "discovering" + REDISCOVERING = "rediscovering" + DEVICE_SHUTTING_DOWN = "deviceShuttingDown" + UNREACHABLE = "unreachable" + IP_ADDRESS_CHANGE = "ipAddressChange" + DISCOVERY_TIMEOUT = "discoveryTimeout" + RETRYING = "retrying" + SSH_SESSION_ERROR = "sshSessionError" + TIMEOUT = "timeout" + UNKNOWN_USER_PASSWORD = "unknownUserPassword" + CONNECTION_ERROR = "connectionError" + NOT_APPLICABLE = "notApplicable" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class ConfigSyncStatus(str, Enum): + """ + Configuration sync status. + + Based on: components/schemas/switchConfigSyncStatus + """ + DEPLOYED = "deployed" + DEPLOYMENT_IN_PROGRESS = "deploymentInProgress" + FAILED = "failed" + IN_PROGRESS = "inProgress" + IN_SYNC = "inSync" + NOT_APPLICABLE = "notApplicable" + OUT_OF_SYNC = "outOfSync" + PENDING = "pending" + PREVIEW_IN_PROGRESS = "previewInProgress" + SUCCESS = "success" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class VpcRole(str, Enum): + """ + VPC role enumeration. + + Based on: components/schemas/schemas-vpcRole + """ + PRIMARY = "primary" + SECONDARY = "secondary" + OPERATIONAL_PRIMARY = "operationalPrimary" + OPERATIONAL_SECONDARY = "operationalSecondary" + NONE_ESTABLISHED = "noneEstablished" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class RemoteCredentialStore(str, Enum): + """ + Remote credential store type. + + Based on: components/schemas/remoteCredentialStore + """ + LOCAL = "local" + CYBERARK = "cyberark" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class AnomalyLevel(str, Enum): + """ + Anomaly level classification. + """ + CRITICAL = "critical" + MAJOR = "major" + MINOR = "minor" + WARNING = "warning" + INFO = "info" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class AdvisoryLevel(str, Enum): + """ + Advisory level classification. + """ + CRITICAL = "critical" + MAJOR = "major" + MINOR = "minor" + NONE = "none" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +__all__ = [ + "SwitchRole", + "SystemMode", + "PlatformType", + "SnmpV3AuthProtocol", + "DiscoveryStatus", + "ConfigSyncStatus", + "VpcRole", + "RemoteCredentialStore", + "AnomalyLevel", + "AdvisoryLevel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py new file mode 100644 index 00000000..1cd8b8a0 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Pre-provision switch models. + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ipaddress import ip_network +from pydantic import Field, field_validator, model_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from .enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from .validators import SwitchValidators + + +class PreProvisionSwitchModel(NDBaseModel): + """ + Request payload for pre-provisioning a single switch in the fabric. + + Path: POST /fabrics/{fabricName}/switchActions/preProvision + """ + + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + + # --- preProvisionSpecific fields (required) --- + serial_number: str = Field( + ..., + alias="serialNumber", + description="Serial number of the switch to pre-provision", + ) + hostname: str = Field( + ..., + description="Hostname of the switch to pre-provision", + ) + ip: str = Field( + ..., + description="IP address of the switch to pre-provision", + ) + + # --- preProvisionSpecific fields (optional) --- + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp", + description="Used for device day-0 bring-up when using inband reachability", + ) + seed_switch: bool = Field( + default=False, + alias="seedSwitch", + description="Use as seed switch", + ) + + # --- bootstrapBase fields (required) --- + model: str = Field( + ..., + description="Model of the switch to pre-provision", + ) + software_version: str = Field( + ..., + alias="softwareVersion", + description="Software version of the switch to pre-provision", + ) + gateway_ip_mask: str = Field( + ..., + alias="gatewayIpMask", + description="Gateway IP address with mask (e.g., 10.23.244.1/24)", + ) + + # --- bootstrapBase fields (optional) --- + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during pre-provision", + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole", + description="Role to assign to the switch", + ) + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Pre-provision configuration data block (gatewayIpMask, models)", + ) + + # --- bootstrapCredential fields (required) --- + password: str = Field( + ..., + description="Switch password to be set during pre-provision for admin user", + ) + discovery_auth_protocol: SnmpV3AuthProtocol = Field( + ..., + alias="discoveryAuthProtocol", + description="SNMP authentication protocol for discovery", + ) + + # --- bootstrapCredential fields (optional) --- + use_new_credentials: bool = Field( + default=False, + alias="useNewCredentials", + description=( + "If True, use discoveryUsername and discoveryPassword for local " + "remoteCredentialStore or use remoteCredentialStoreKey for CyberArk" + ), + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for switch discovery post pre-provision", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for switch discovery post pre-provision", + ) + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials", + ) + + # --- Validators --- + + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None + result = SwitchValidators.validate_ip_address(v) + if result is None: + raise ValueError(f"Invalid IP address: {v}") + return result + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result + + @field_validator("gateway_ip_mask", mode="before") + @classmethod + def validate_gateway(cls, v: str) -> str: + if not v or "/" not in v: + raise ValueError( + "gatewayIpMask must include subnet mask (e.g., 10.23.244.1/24)" + ) + try: + ip_network(v, strict=False) + except Exception as exc: + raise ValueError(f"Invalid gatewayIpMask: {v}") from exc + return v + + @model_validator(mode='after') + def derive_use_new_credentials(self) -> Self: + """Auto-set useNewCredentials when both discoveryUsername and discoveryPassword are provided.""" + self.use_new_credentials = bool(self.discovery_username and self.discovery_password) + return self + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format matching preProvision spec.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """Create model instance from API response.""" + return cls.model_validate(response) + + +class PreProvisionSwitchesRequestModel(NDBaseModel): + """ + Request body wrapping a list of pre-provision payloads for bulk switch pre-provisioning. + + Path: POST /fabrics/{fabricName}/switchActions/preProvision + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + switches: List[PreProvisionSwitchModel] = Field( + ..., + description="PowerOn Auto Provisioning switches", + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return { + "switches": [s.to_payload() for s in self.switches] + } + + +__all__ = [ + "PreProvisionSwitchModel", + "PreProvisionSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/rma_models.py b/plugins/module_utils/models/nd_manage_switches/rma_models.py new file mode 100644 index 00000000..1f5be8b5 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/rma_models.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""RMA (Return Material Authorization) switch models. + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, computed_field, field_validator, model_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from .enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from .validators import SwitchValidators + + +class RMASpecificModel(NDBaseModel): + """ + Replacement-switch-specific fields used in an RMA bootstrap operation. + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + hostname: str = Field( + ..., + description="Hostname of the switch" + ) + ip: str = Field( + ..., + description="IP address of the switch" + ) + new_switch_id: str = Field( + ..., + alias="newSwitchId", + description="SwitchId (serial number) of the switch" + ) + public_key: str = Field( + ..., + alias="publicKey", + description="Public Key" + ) + finger_print: str = Field( + ..., + alias="fingerPrint", + description="Fingerprint" + ) + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp", + description="This is used for device day-0 bring-up when using inband reachability" + ) + seed_switch: bool = Field( + default=False, + alias="seedSwitch", + description="Use as seed switch" + ) + + @field_validator('hostname', mode='before') + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None + return SwitchValidators.validate_ip_address(v) + + @field_validator('new_switch_id', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("new_switch_id cannot be empty") + return result + + +class RMASwitchModel(NDBaseModel): + """ + Request payload for provisioning a replacement (RMA) switch via bootstrap. + + Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/provisionRMA + """ + identifiers: ClassVar[List[str]] = ["new_switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + # From bootstrapBase + gateway_ip_mask: str = Field( + ..., + alias="gatewayIpMask", + description="Gateway IP address with mask" + ) + model: str = Field( + ..., + description="Model of the bootstrap switch" + ) + software_version: str = Field( + ..., + alias="softwareVersion", + description="Software version of the bootstrap switch" + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap" + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole" + ) + + # From bootstrapCredential + password: str = Field( + ..., + description="Switch password to be set during bootstrap for admin user" + ) + discovery_auth_protocol: SnmpV3AuthProtocol = Field( + ..., + alias="discoveryAuthProtocol" + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword" + ) + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey" + ) + + # From RMASpecific + hostname: str = Field( + ..., + description="Hostname of the switch" + ) + ip: str = Field( + ..., + description="IP address of the switch" + ) + new_switch_id: str = Field( + ..., + alias="newSwitchId", + description="SwitchId (serial number) of the switch" + ) + public_key: str = Field( + ..., + alias="publicKey", + description="Public Key" + ) + finger_print: str = Field( + ..., + alias="fingerPrint", + description="Fingerprint" + ) + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp" + ) + seed_switch: bool = Field( + default=False, + alias="seedSwitch" + ) + + @field_validator('gateway_ip_mask', mode='before') + @classmethod + def validate_gateway(cls, v: str) -> str: + result = SwitchValidators.validate_cidr(v) + if result is None: + raise ValueError("gateway_ip_mask cannot be empty") + return result + + @field_validator('hostname', mode='before') + @classmethod + def validate_host(cls, v: str) -> str: + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None + result = SwitchValidators.validate_ip_address(v) + if v is not None and result is None: + raise ValueError(f"Invalid IP address: {v}") + return result + + @field_validator('new_switch_id', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("new_switch_id cannot be empty") + return result + + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) + + @model_validator(mode='after') + def validate_rma_credentials(self) -> Self: + """Validate RMA credential configuration logic.""" + if self.use_new_credentials: + if self.remote_credential_store == RemoteCredentialStore.CYBERARK: + if not self.remote_credential_store_key: + raise ValueError( + "remote_credential_store_key is required when " + "remote_credential_store is 'cyberark'" + ) + elif self.remote_credential_store == RemoteCredentialStore.LOCAL: + if not self.discovery_username or not self.discovery_password: + raise ValueError( + "discovery_username and discovery_password are required when " + "remote_credential_store is 'local' and use_new_credentials is True" + ) + return self + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """Create model instance from API response.""" + return cls.model_validate(response) + + +__all__ = [ + "RMASpecificModel", + "RMASwitchModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py b/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py new file mode 100644 index 00000000..76b207da --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch action models (serial number change, IDs list, credentials). + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, field_validator, model_validator +from typing import Any, Dict, List, Literal, Optional, ClassVar +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from .validators import SwitchValidators + + +class SwitchCredentialsRequestModel(NDBaseModel): + """ + Request body to save LAN credentials for one or more fabric switches. + + Supports local credentials or remote credential store (such as CyberArk). + Path: POST /api/v1/manage/credentials/switches + """ + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + + switch_ids: List[str] = Field( + ..., + alias="switchIds", + min_length=1, + description="List of switch serial numbers" + ) + switch_username: Optional[str] = Field( + default=None, + alias="switchUsername", + description="Switch username" + ) + switch_password: Optional[str] = Field( + default=None, + alias="switchPassword", + description="Switch password" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key (e.g. CyberArk path)" + ) + remote_credential_store_type: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreType", + description="Remote credential store type (e.g. 'cyberark')" + ) + + @field_validator('switch_ids', mode='before') + @classmethod + def validate_switch_ids(cls, v: List[str]) -> List[str]: + """Validate all switch IDs.""" + if not v: + raise ValueError("At least one switch ID is required") + validated = [] + for serial in v: + result = SwitchValidators.validate_serial_number(serial) + if result: + validated.append(result) + if not validated: + raise ValueError("No valid switch IDs provided") + return validated + + @model_validator(mode='after') + def validate_credentials(self) -> Self: + """Ensure either local or remote credentials are provided.""" + has_local = self.switch_username is not None and self.switch_password is not None + has_remote = self.remote_credential_store_key is not None and self.remote_credential_store_type is not None + if not has_local and not has_remote: + raise ValueError( + "Either local credentials (switchUsername + switchPassword) " + "or remote credentials (remoteCredentialStoreKey + remoteCredentialStoreType) must be provided" + ) + return self + + +class ChangeSwitchSerialNumberRequestModel(NDBaseModel): + """ + Request body to update the serial number of an existing fabric switch. + + Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/changeSwitchSerialNumber + """ + identifiers: ClassVar[List[str]] = ["new_switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + new_switch_id: str = Field( + ..., + alias="newSwitchId", + description="New switchId" + ) + + @field_validator('new_switch_id', mode='before') + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("new_switch_id cannot be empty") + return result + + +__all__ = [ + "SwitchCredentialsRequestModel", + "ChangeSwitchSerialNumberRequestModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py new file mode 100644 index 00000000..5afc6117 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py @@ -0,0 +1,488 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch inventory data models (API response representations). + +Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, field_validator +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union +from typing_extensions import Self + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel + +from .enums import ( + AdvisoryLevel, + AnomalyLevel, + ConfigSyncStatus, + DiscoveryStatus, + PlatformType, + RemoteCredentialStore, + SwitchRole, + SystemMode, + VpcRole, +) +from .validators import SwitchValidators + + +class TelemetryIpCollection(NDNestedModel): + """ + Inband and out-of-band telemetry IP addresses for a switch. + """ + identifiers: ClassVar[List[str]] = [] + inband_ipv4_address: Optional[str] = Field( + default=None, + alias="inbandIpV4Address", + description="Inband IPv4 address" + ) + inband_ipv6_address: Optional[str] = Field( + default=None, + alias="inbandIpV6Address", + description="Inband IPv6 address" + ) + out_of_band_ipv4_address: Optional[str] = Field( + default=None, + alias="outOfBandIpV4Address", + description="Out of band IPv4 address" + ) + out_of_band_ipv6_address: Optional[str] = Field( + default=None, + alias="outOfBandIpV6Address", + description="Out of band IPv6 address" + ) + + @field_validator('inband_ipv4_address', 'out_of_band_ipv4_address', mode='before') + @classmethod + def validate_ipv4(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + +class VpcData(NDNestedModel): + """ + vPC pair configuration and operational status for a switch. + """ + identifiers: ClassVar[List[str]] = [] + vpc_domain: int = Field( + ..., + alias="vpcDomain", + ge=1, + le=1000, + description="vPC domain ID" + ) + peer_switch_id: str = Field( + ..., + alias="peerSwitchId", + description="vPC peer switch serial number" + ) + consistent_status: Optional[bool] = Field( + default=None, + alias="consistentStatus", + description="Flag to indicate the vPC status is consistent" + ) + intended_peer_name: Optional[str] = Field( + default=None, + alias="intendedPeerName", + description="Intended vPC host name for pre-provisioned peer switch" + ) + keep_alive_status: Optional[str] = Field( + default=None, + alias="keepAliveStatus", + description="vPC peer keep alive status" + ) + peer_link_status: Optional[str] = Field( + default=None, + alias="peerLinkStatus", + description="vPC peer link status" + ) + peer_name: Optional[str] = Field( + default=None, + alias="peerName", + description="vPC peer switch name" + ) + vpc_role: Optional[VpcRole] = Field( + default=None, + alias="vpcRole", + description="The vPC role" + ) + + @field_validator('peer_switch_id', mode='before') + @classmethod + def validate_peer_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("peer_switch_id cannot be empty") + return result + + +class SwitchMetadata(NDNestedModel): + """ + Internal database identifiers associated with a switch record. + """ + identifiers: ClassVar[List[str]] = [] + switch_db_id: Optional[int] = Field( + default=None, + alias="switchDbId", + description="Database Id of the switch" + ) + switch_uuid: Optional[str] = Field( + default=None, + alias="switchUuid", + description="Internal unique Id of the switch" + ) + + +class AdditionalSwitchData(NDNestedModel): + """ + Platform-specific additional data for NX-OS switches. + """ + identifiers: ClassVar[List[str]] = [] + usage: Optional[str] = Field( + default="others", + description="The usage of additional data" + ) + config_sync_status: Optional[ConfigSyncStatus] = Field( + default=None, + alias="configSyncStatus", + description="Configuration sync status" + ) + discovery_status: Optional[DiscoveryStatus] = Field( + default=None, + alias="discoveryStatus", + description="Discovery status" + ) + domain_name: Optional[str] = Field( + default=None, + alias="domainName", + description="Domain name" + ) + smart_switch: Optional[bool] = Field( + default=None, + alias="smartSwitch", + description="Flag that indicates if the switch is equipped with DPUs or not" + ) + hypershield_connectivity_status: Optional[str] = Field( + default=None, + alias="hypershieldConnectivityStatus", + description="Smart switch connectivity status to hypershield controller" + ) + hypershield_tenant: Optional[str] = Field( + default=None, + alias="hypershieldTenant", + description="Hypershield tenant name" + ) + hypershield_integration_name: Optional[str] = Field( + default=None, + alias="hypershieldIntegrationName", + description="Hypershield Integration Id" + ) + source_interface_name: Optional[str] = Field( + default=None, + alias="sourceInterfaceName", + description="Source interface for switch discovery" + ) + source_vrf_name: Optional[str] = Field( + default=None, + alias="sourceVrfName", + description="Source VRF for switch discovery" + ) + platform_type: Optional[PlatformType] = Field( + default=None, + alias="platformType", + description="Platform type of the switch" + ) + discovered_system_mode: Optional[SystemMode] = Field( + default=None, + alias="discoveredSystemMode", + description="Discovered system mode" + ) + intended_system_mode: Optional[SystemMode] = Field( + default=None, + alias="intendedSystemMode", + description="Intended system mode" + ) + scalable_unit: Optional[str] = Field( + default=None, + alias="scalableUnit", + description="Name of the scalable unit" + ) + system_mode: Optional[SystemMode] = Field( + default=None, + alias="systemMode", + description="System mode" + ) + vendor: Optional[str] = Field( + default=None, + description="Vendor of the switch" + ) + username: Optional[str] = Field( + default=None, + description="Discovery user name" + ) + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore" + ) + meta: Optional[SwitchMetadata] = Field( + default=None, + description="Switch metadata" + ) + + +class AdditionalAciSwitchData(NDNestedModel): + """ + Platform-specific additional data for ACI leaf and spine switches. + """ + identifiers: ClassVar[List[str]] = [] + usage: Optional[str] = Field( + default="aci", + description="The usage of additional data" + ) + admin_status: Optional[Literal["inService", "outOfService"]] = Field( + default=None, + alias="adminStatus", + description="Admin status" + ) + health_score: Optional[int] = Field( + default=None, + alias="healthScore", + ge=1, + le=100, + description="Switch health score" + ) + last_reload_time: Optional[str] = Field( + default=None, + alias="lastReloadTime", + description="Timestamp when the system is last reloaded" + ) + last_software_update_time: Optional[str] = Field( + default=None, + alias="lastSoftwareUpdateTime", + description="Timestamp when the software is last updated" + ) + node_id: Optional[int] = Field( + default=None, + alias="nodeId", + ge=1, + description="Node ID" + ) + node_status: Optional[Literal["active", "inActive"]] = Field( + default=None, + alias="nodeStatus", + description="Node status" + ) + pod_id: Optional[int] = Field( + default=None, + alias="podId", + ge=1, + description="Pod ID" + ) + remote_leaf_group_name: Optional[str] = Field( + default=None, + alias="remoteLeafGroupName", + description="Remote leaf group name" + ) + switch_added: Optional[str] = Field( + default=None, + alias="switchAdded", + description="Timestamp when the switch is added" + ) + tep_pool: Optional[str] = Field( + default=None, + alias="tepPool", + description="TEP IP pool" + ) + + +class Metadata(NDNestedModel): + """ + Pagination and result-count metadata from a list API response. + """ + identifiers: ClassVar[List[str]] = [] + + counts: Optional[Dict[str, int]] = Field( + default=None, + description="Count information including total and remaining" + ) + + +class SwitchDataModel(NDBaseModel): + """ + Inventory record for a single switch as returned by the fabric switches API. + + Path: GET /fabrics/{fabricName}/switches + """ + identifiers: ClassVar[List[str]] = ["switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + switch_id: str = Field( + ..., + alias="switchId", + description="Serial number of Switch or Node Id of ACI switch" + ) + serial_number: Optional[str] = Field( + default=None, + alias="serialNumber", + description="Serial number of switch or APIC controller node" + ) + additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = Field( + default=None, + alias="additionalData", + description="Additional switch data" + ) + advisory_level: Optional[AdvisoryLevel] = Field( + default=None, + alias="advisoryLevel" + ) + anomaly_level: Optional[AnomalyLevel] = Field( + default=None, + alias="anomalyLevel" + ) + alert_suspend: Optional[str] = Field( + default=None, + alias="alertSuspend" + ) + fabric_management_ip: Optional[str] = Field( + default=None, + alias="fabricManagementIp", + description="Switch IPv4/v6 address used for management" + ) + fabric_name: Optional[str] = Field( + default=None, + alias="fabricName", + description="Fabric name", + max_length=64 + ) + fabric_type: Optional[str] = Field( + default=None, + alias="fabricType", + description="Fabric type" + ) + hostname: Optional[str] = Field( + default=None, + description="Switch host name" + ) + model: Optional[str] = Field( + default=None, + description="Model of switch or APIC controller node" + ) + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version of switch or APIC controller node" + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole" + ) + mode: Optional[str] = Field( + default=None, + description="Switch mode (Normal, Migration, etc.)" + ) + system_up_time: Optional[str] = Field( + default=None, + alias="systemUpTime", + description="System up time" + ) + vpc_configured: Optional[bool] = Field( + default=None, + alias="vpcConfigured", + description="Flag to indicate switch is part of a vPC domain" + ) + vpc_data: Optional[VpcData] = Field( + default=None, + alias="vpcData" + ) + telemetry_ip_collection: Optional[TelemetryIpCollection] = Field( + default=None, + alias="telemetryIpCollection" + ) + + @field_validator('additional_data', mode='before') + @classmethod + def parse_additional_data(cls, v: Any) -> Any: + """Route additionalData to the correct nested model. + + The NDFC API may omit the ``usage`` field for non-ACI switches. + Default to ``"others"`` so Pydantic selects ``AdditionalSwitchData`` + and coerces ``discoveryStatus`` / ``systemMode`` as proper enums. + """ + if v is None or not isinstance(v, dict): + return v + if 'usage' not in v: + v = {**v, 'usage': 'others'} + return v + + @field_validator('switch_id', mode='before') + @classmethod + def validate_switch_id(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("switch_id cannot be empty") + return result + + @field_validator('fabric_management_ip', mode='before') + @classmethod + def validate_mgmt_ip(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """ + Create model instance from API response. + + Handles two response formats: + 1. Inventory API format: {switchId, fabricManagementIp, switchRole, ...} + 2. Discovery API format: {serialNumber, ip, hostname, model, softwareVersion, status, ...} + + Args: + response: Response dict from either inventory or discovery API + + Returns: + SwitchDataModel instance + """ + # Detect format and transform if needed + if "switchId" in response or "fabricManagementIp" in response: + # Already in inventory format - use as-is + return cls.model_validate(response) + + # Discovery format - transform to inventory format + transformed = { + "switchId": response.get("serialNumber"), + "serialNumber": response.get("serialNumber"), + "fabricManagementIp": response.get("ip"), + "hostname": response.get("hostname"), + "model": response.get("model"), + "softwareVersion": response.get("softwareVersion"), + "mode": response.get("mode", "Normal"), + } + + # Only add switchRole if present in response (avoid overwriting with None) + if "switchRole" in response: + transformed["switchRole"] = response["switchRole"] + elif "role" in response: + transformed["switchRole"] = response["role"] + + return cls.model_validate(transformed) + + +__all__ = [ + "TelemetryIpCollection", + "VpcData", + "SwitchMetadata", + "AdditionalSwitchData", + "AdditionalAciSwitchData", + "Metadata", + "SwitchDataModel", +] diff --git a/plugins/module_utils/models/nd_manage_switches/validators.py b/plugins/module_utils/models/nd_manage_switches/validators.py new file mode 100644 index 00000000..b2e3a704 --- /dev/null +++ b/plugins/module_utils/models/nd_manage_switches/validators.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Common validators for switch-related fields.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +from ipaddress import ip_address, ip_network +from typing import Optional + + +class SwitchValidators: + """ + Common validators for switch-related fields. + """ + + @staticmethod + def validate_ip_address(v: Optional[str]) -> Optional[str]: + """Validate IPv4 or IPv6 address.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + try: + ip_address(v) + return v + except ValueError: + raise ValueError(f"Invalid IP address format: {v}") + + @staticmethod + def validate_cidr(v: Optional[str]) -> Optional[str]: + """Validate CIDR notation (IP/mask).""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + if '/' not in v: + raise ValueError(f"CIDR notation required (IP/mask format): {v}") + try: + ip_network(v, strict=False) + return v + except ValueError: + raise ValueError(f"Invalid CIDR format: {v}") + + @staticmethod + def validate_serial_number(v: Optional[str]) -> Optional[str]: + """Validate switch serial number format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # Serial numbers are typically alphanumeric with optional hyphens + if not re.match(r'^[A-Za-z0-9_-]+$', v): + raise ValueError( + f"Serial number must be alphanumeric with optional hyphens/underscores: {v}" + ) + return v + + @staticmethod + def validate_hostname(v: Optional[str]) -> Optional[str]: + """Validate hostname format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # RFC 1123 hostname validation + if len(v) > 255: + raise ValueError("Hostname cannot exceed 255 characters") + # Allow alphanumeric, dots, hyphens, underscores + if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9._-]*$', v): + raise ValueError( + f"Invalid hostname format. Must start with alphanumeric and " + f"contain only alphanumeric, dots, hyphens, underscores: {v}" + ) + if v.startswith('.') or v.endswith('.') or '..' in v: + raise ValueError(f"Invalid hostname format (dots): {v}") + return v + + @staticmethod + def validate_mac_address(v: Optional[str]) -> Optional[str]: + """Validate MAC address format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # Accept colon or hyphen separated MAC addresses + mac_pattern = r'^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + if not re.match(mac_pattern, v): + raise ValueError(f"Invalid MAC address format: {v}") + return v + + @staticmethod + def validate_vpc_domain(v: Optional[int]) -> Optional[int]: + """Validate VPC domain ID (1-1000).""" + if v is None: + return None + if not 1 <= v <= 1000: + raise ValueError(f"VPC domain must be between 1 and 1000: {v}") + return v + + +__all__ = [ + "SwitchValidators", +] diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py new file mode 100644 index 00000000..3d9a7f69 --- /dev/null +++ b/plugins/module_utils/nd_switch_resources.py @@ -0,0 +1,2611 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Manage ND fabric switch lifecycle workflows. + +This module validates desired switch state, performs discovery and fabric +operations, and coordinates POAP and RMA workflows. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from copy import deepcopy +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import ValidationError + +from .nd_v2 import NDModule +from .enums import OperationType +from .rest.results import Results +from .models.nd_manage_switches import ( + SwitchRole, + SnmpV3AuthProtocol, + PlatformType, + DiscoveryStatus, + SystemMode, + SwitchDiscoveryModel, + SwitchDataModel, + AddSwitchesRequestModel, + ShallowDiscoveryRequestModel, + BootstrapImportSwitchModel, + ImportBootstrapSwitchesRequestModel, + PreProvisionSwitchModel, + PreProvisionSwitchesRequestModel, + RMASwitchModel, + SwitchConfigModel, + SwitchCredentialsRequestModel, + ChangeSwitchSerialNumberRequestModel, + POAPConfigModel, + RMAConfigModel, +) +from .utils.nd_manage_switches import ( + FabricUtils, + SwitchWaitUtils, + SwitchOperationError, + mask_password, + get_switch_field, + group_switches_by_credentials, + query_bootstrap_switches, + build_bootstrap_index, + build_poap_data_block, +) +from .endpoints.v1.nd_manage_switches.manage_fabric_switches import ( + V1ManageFabricSwitchesGet, + V1ManageFabricSwitchesPost, +) +from .endpoints.v1.nd_manage_switches.manage_fabric_discovery import V1ManageFabricShallowDiscoveryPost +from .endpoints.v1.nd_manage_switches.manage_fabric_switch_actions import ( + V1ManageFabricSwitchProvisionRMAPost, + V1ManageFabricSwitchActionsImportBootstrapPost, + V1ManageFabricSwitchActionsPreProvisionPost, + V1ManageFabricSwitchActionsRemovePost, + V1ManageFabricSwitchActionsChangeRolesPost, + V1ManageFabricSwitchChangeSerialNumberPost, +) +from .endpoints.v1.nd_manage_switches.manage_credentials import V1ManageCredentialsSwitchesPost + + +# ========================================================================= +# Shared Dependency Container +# ========================================================================= + +@dataclass +class SwitchServiceContext: + """Store shared dependencies used by service classes. + + Attributes: + nd: ND module wrapper for requests and module interactions. + results: Shared results aggregator for task output. + fabric: Target fabric name. + log: Logger instance. + save_config: Whether to run fabric save after changes. + deploy_config: Whether to run fabric deploy after changes. + """ + nd: NDModule + results: Results + fabric: str + log: logging.Logger + save_config: bool = True + deploy_config: bool = True + + +# ========================================================================= +# Validation & Diff +# ========================================================================= + +class SwitchDiffEngine: + """Provide stateless validation and diff computation helpers.""" + + @staticmethod + def validate_configs( + config: Union[Dict[str, Any], List[Dict[str, Any]]], + state: str, + nd: NDModule, + log: logging.Logger, + ) -> List[SwitchConfigModel]: + """Validate raw module config and return typed switch configs. + + Args: + config: Raw config dict or list of dicts from module parameters. + state: Requested module state. + nd: ND module wrapper used for failure handling. + log: Logger instance. + + Returns: + List of validated ``SwitchConfigModel`` objects. + + Raises: + ValidationError: Raised by model validation for invalid input. + """ + log.debug("ENTER: validate_configs()") + + configs_list = config if isinstance(config, list) else [config] + log.debug(f"Normalized to {len(configs_list)} configuration(s)") + + validated_configs: List[SwitchConfigModel] = [] + for idx, cfg in enumerate(configs_list): + try: + validated = SwitchConfigModel.model_validate( + cfg, context={"state": state} + ) + validated_configs.append(validated) + except ValidationError as e: + error_detail = e.errors() if hasattr(e, 'errors') else str(e) + error_msg = ( + f"Configuration validation failed for " + f"config index {idx}: {error_detail}" + ) + log.error(error_msg) + if hasattr(nd, 'module'): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + except Exception as e: + error_msg = ( + f"Configuration validation failed for " + f"config index {idx}: {str(e)}" + ) + log.error(error_msg) + if hasattr(nd, 'module'): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + + if not validated_configs: + log.warning("No valid configurations found in input") + return validated_configs + + # Cross-config check — model can't do this per-instance + try: + SwitchConfigModel.validate_no_mixed_operations(validated_configs) + except ValueError as e: + error_msg = str(e) + log.error(error_msg) + if hasattr(nd, 'module'): + nd.module.fail_json(msg=error_msg) + else: + raise + + operation_type = validated_configs[0].operation_type + log.info( + f"Successfully validated {len(validated_configs)} " + f"configuration(s) with operation type: {operation_type}" + ) + log.debug( + f"EXIT: validate_configs() -> " + f"{len(validated_configs)} configs, operation_type={operation_type}" + ) + return validated_configs + + @staticmethod + def compute_changes( + proposed: List[SwitchDataModel], + existing: List[SwitchDataModel], + log: logging.Logger, + ) -> Dict[str, List[SwitchDataModel]]: + """Compare proposed and existing switches and categorize changes. + + Args: + proposed: Switch models representing desired state. + existing: Switch models currently present in inventory. + log: Logger instance. + + Returns: + Dict mapping change buckets to switch lists. Buckets are + ``to_add``, ``to_update``, ``to_delete``, ``migration_mode``, + and ``idempotent``. + """ + log.debug("ENTER: compute_changes()") + log.debug( + f"Comparing {len(proposed)} proposed vs {len(existing)} existing switches" + ) + + # Build indexes for O(1) lookups + existing_by_id = {sw.switch_id: sw for sw in existing} + existing_by_ip = {sw.fabric_management_ip: sw for sw in existing} + + log.debug( + f"Indexes built — existing_by_id: {list(existing_by_id.keys())}, " + f"existing_by_ip: {list(existing_by_ip.keys())}" + ) + + # Only user-controllable fields populated by both discovery and + # inventory APIs. Server-managed fields (uptime, alerts, vpc info, + # telemetry, etc.) are ignored. + compare_fields = { + "switch_id", + "serial_number", + "fabric_management_ip", + "hostname", + "model", + "software_version", + "switch_role", + } + + changes: Dict[str, list] = { + "to_add": [], + "to_update": [], + "to_delete": [], + "migration_mode": [], + "idempotent": [], + } + + # Categorise proposed switches + for prop_sw in proposed: + ip = prop_sw.fabric_management_ip + sid = prop_sw.switch_id + + existing_sw = existing_by_id.get(sid) + match_key = "switch_id" if existing_sw else None + + if not existing_sw: + existing_sw = existing_by_ip.get(ip) + if existing_sw: + match_key = "ip" + + if not existing_sw: + log.info( + f"Switch {ip} (id={sid}) not found in existing — marking to_add" + ) + changes["to_add"].append(prop_sw) + continue + + log.debug( + f"Switch {ip} matched existing by {match_key} " + f"(existing_id={existing_sw.switch_id})" + ) + + if existing_sw.mode == "Migration": + log.info( + f"Switch {ip} ({existing_sw.switch_id}) is in Migration mode" + ) + changes["migration_mode"].append(prop_sw) + continue + + prop_dict = prop_sw.model_dump( + by_alias=True, exclude_none=True, include=compare_fields + ) + existing_dict = existing_sw.model_dump( + by_alias=True, exclude_none=True, include=compare_fields + ) + + if prop_dict == existing_dict: + log.debug(f"Switch {ip} is idempotent — no changes needed") + changes["idempotent"].append(prop_sw) + else: + diff_keys = { + k for k in set(prop_dict) | set(existing_dict) + if prop_dict.get(k) != existing_dict.get(k) + } + log.info( + f"Switch {ip} has differences — marking to_update. " + f"Changed fields: {diff_keys}" + ) + log.debug( + f"Switch {ip} diff detail — " + f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " + f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" + ) + changes["to_update"].append(prop_sw) + + # Switches in existing but not in proposed (for overridden state) + proposed_ids = {sw.switch_id for sw in proposed} + for existing_sw in existing: + if existing_sw.switch_id not in proposed_ids: + log.info( + f"Existing switch {existing_sw.fabric_management_ip} " + f"({existing_sw.switch_id}) not in proposed — marking to_delete" + ) + changes["to_delete"].append(existing_sw) + + log.info( + f"Compute changes summary: " + f"to_add={len(changes['to_add'])}, " + f"to_update={len(changes['to_update'])}, " + f"to_delete={len(changes['to_delete'])}, " + f"migration_mode={len(changes['migration_mode'])}, " + f"idempotent={len(changes['idempotent'])}" + ) + log.debug("EXIT: compute_changes()") + return changes + + +# ========================================================================= +# Switch Discovery Service +# ========================================================================= + +class SwitchDiscoveryService: + """Handle switch discovery and proposed-model construction.""" + + def __init__(self, ctx: SwitchServiceContext): + """Initialize the discovery service. + + Args: + ctx: Shared service context. + + Returns: + None. + """ + self.ctx = ctx + + def discover( + self, + switch_configs: List[SwitchConfigModel], + ) -> Dict[str, Dict[str, Any]]: + """Discover switches for the provided config list. + + Args: + switch_configs: Validated switch configuration entries. + + Returns: + Dict mapping seed IP to raw discovery data. + """ + log = self.ctx.log + log.debug("Step 1: Grouping switches by credentials") + credential_groups = group_switches_by_credentials(switch_configs, log) + log.debug(f"Created {len(credential_groups)} credential group(s)") + + log.debug("Step 2: Bulk discovering switches") + all_discovered: Dict[str, Dict[str, Any]] = {} + for group_key, switches in credential_groups.items(): + username, _, auth_proto, platform_type, _ = group_key + password = switches[0].password + + log.debug( + f"Discovering group: {len(switches)} switches with username={username}" + ) + try: + discovered_batch = self.bulk_discover( + switches=switches, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + ) + all_discovered.update(discovered_batch) + except Exception as e: + seed_ips = [sw.seed_ip for sw in switches] + msg = ( + f"Discovery failed for credential group " + f"(username={username}, IPs={seed_ips}): {e}" + ) + log.error(msg) + self.ctx.nd.module.fail_json(msg=msg) + + log.debug(f"Total discovered: {len(all_discovered)} switches") + return all_discovered + + def bulk_discover( + self, + switches: List[SwitchConfigModel], + username: str, + password: str, + auth_proto: SnmpV3AuthProtocol, + platform_type: PlatformType, + ) -> Dict[str, Dict[str, Any]]: + """Run one bulk discovery call for switches with shared credentials. + + Args: + switches: Switches to discover. + username: Discovery username. + password: Discovery password. + auth_proto: SNMP v3 authentication protocol. + platform_type: Platform type for discovery. + + Returns: + Dict mapping seed IP to discovered switch data. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_discover()") + log.debug(f"Discovering {len(switches)} switches in bulk") + + endpoint = V1ManageFabricShallowDiscoveryPost() + endpoint.fabric_name = self.ctx.fabric + + seed_ips = [switch.seed_ip for switch in switches] + log.debug(f"Seed IPs: {seed_ips}") + + max_hops = switches[0].max_hops if hasattr(switches[0], 'max_hops') else 0 + + discovery_request = ShallowDiscoveryRequestModel( + seedIpCollection=seed_ips, + maxHop=max_hops, + platformType=platform_type, + snmpV3AuthProtocol=auth_proto, + username=username, + password=password, + ) + + payload = discovery_request.to_payload() + log.info(f"Bulk discovering {len(seed_ips)} switches: {', '.join(seed_ips)}") + log.debug(f"Discovery endpoint: {endpoint.path}") + log.debug(f"Discovery payload (password masked): {mask_password(payload)}") + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "discover" + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_task_result() + + # Extract discovered switches from response + switches_data = [] + if response and isinstance(response, dict): + if "DATA" in response and isinstance(response["DATA"], dict): + switches_data = response["DATA"].get("switches", []) + elif "body" in response and isinstance(response["body"], dict): + switches_data = response["body"].get("switches", []) + elif "switches" in response: + switches_data = response.get("switches", []) + + log.debug( + f"Extracted {len(switches_data)} switches from discovery response" + ) + + discovered_results: Dict[str, Dict[str, Any]] = {} + for discovered in switches_data: + if not isinstance(discovered, dict): + continue + + ip = discovered.get("ip") + status = discovered.get("status", "").lower() + serial_number = discovered.get("serialNumber") + + if not serial_number: + msg = ( + f"Switch {ip} discovery response missing serial number. " + f"Cannot proceed without a valid serial number." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + if not ip: + msg = ( + f"Switch with serial {serial_number} discovery response " + f"missing IP address. Cannot proceed without a valid IP." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + if status in ("manageable", "ok"): + discovered_results[ip] = discovered + log.info( + f"Switch {ip} ({serial_number}) discovered successfully - status: {status}" + ) + elif status == "alreadymanaged": + log.info(f"Switch {ip} ({serial_number}) is already managed") + discovered_results[ip] = discovered + else: + reason = discovered.get("statusReason", "Unknown") + log.error( + f"Switch {ip} discovery failed - status: {status}, reason: {reason}" + ) + + for seed_ip in seed_ips: + if seed_ip not in discovered_results: + log.warning(f"Switch {seed_ip} not found in discovery response") + + log.info( + f"Bulk discovery completed: " + f"{len(discovered_results)}/{len(seed_ips)} switches successful" + ) + log.debug(f"Discovered switches: {list(discovered_results.keys())}") + log.debug( + f"EXIT: bulk_discover() -> {len(discovered_results)} discovered" + ) + return discovered_results + + except Exception as e: + msg = ( + f"Bulk discovery failed for switches " + f"{', '.join(seed_ips)}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + def build_proposed( + self, + proposed_config: List[SwitchConfigModel], + discovered_data: Dict[str, Dict[str, Any]], + existing: List[SwitchDataModel], + ) -> List[SwitchDataModel]: + """Build proposed switch models from discovery and inventory data. + + Args: + proposed_config: Validated switch config entries. + discovered_data: Mapping of seed IP to raw discovery data. + existing: Current fabric inventory snapshot. + + Returns: + List of ``SwitchDataModel`` instances for proposed state. + """ + log = self.ctx.log + proposed: List[SwitchDataModel] = [] + + for cfg in proposed_config: + seed_ip = cfg.seed_ip + discovered = discovered_data.get(seed_ip) + + if discovered: + if cfg.role is not None: + discovered["role"] = cfg.role + proposed.append( + SwitchDataModel.from_response(discovered) + ) + log.debug(f"Built proposed model from discovery for {seed_ip}") + continue + + # Fallback: switch may already be in the fabric inventory + existing_match = next( + (sw for sw in existing if sw.fabric_management_ip == seed_ip), + None, + ) + if existing_match: + proposed.append(existing_match) + log.warning( + f"Switch {seed_ip} not discovered but found in existing " + f"inventory — using existing record for comparison" + ) + continue + + msg = ( + f"Switch with seed IP {seed_ip} not discovered " + f"and not found in existing inventory." + ) + log.error(msg) + self.ctx.nd.module.fail_json(msg=msg) + + return proposed + + +# ========================================================================= +# Bulk Fabric Operations +# ========================================================================= + +class SwitchFabricOps: + """Run fabric mutation operations for add, delete, credentials, and roles.""" + + def __init__(self, ctx: SwitchServiceContext, fabric_utils: FabricUtils): + """Initialize the fabric operation service. + + Args: + ctx: Shared service context. + fabric_utils: Utility wrapper for fabric-level operations. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_utils = fabric_utils + + def bulk_add( + self, + switches: List[Tuple[SwitchConfigModel, Dict[str, Any]]], + username: str, + password: str, + auth_proto: SnmpV3AuthProtocol, + platform_type: PlatformType, + preserve_config: bool, + ) -> Dict[str, Any]: + """Add multiple discovered switches to the fabric. + + Args: + switches: List of ``(SwitchConfigModel, discovered_data)`` tuples. + username: Discovery username. + password: Discovery password. + auth_proto: SNMP v3 authentication protocol. + platform_type: Platform type. + preserve_config: Whether to preserve existing switch config. + + Returns: + API response payload. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_add()") + log.debug(f"Adding {len(switches)} switches to fabric") + + endpoint = V1ManageFabricSwitchesPost() + endpoint.fabric_name = self.ctx.fabric + + switch_discoveries = [] + for switch_config, discovered in switches: + required_fields = ["hostname", "ip", "serialNumber", "model"] + missing_fields = [f for f in required_fields if not discovered.get(f)] + + if missing_fields: + msg = ( + f"Switch missing required fields from discovery: " + f"{', '.join(missing_fields)}. Cannot add to fabric." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + switch_role = switch_config.role if hasattr(switch_config, 'role') else None + + switch_discovery = SwitchDiscoveryModel( + hostname=discovered.get("hostname"), + ip=discovered.get("ip"), + serialNumber=discovered.get("serialNumber"), + model=discovered.get("model"), + softwareVersion=discovered.get("softwareVersion"), + switchRole=switch_role, + ) + switch_discoveries.append(switch_discovery) + log.debug( + f"Prepared switch for add: " + f"{discovered.get('serialNumber')} ({discovered.get('hostname')})" + ) + + if not switch_discoveries: + log.error("No valid switches to add after validation") + raise SwitchOperationError("No valid switches to add - all failed validation") + + add_request = AddSwitchesRequestModel( + switches=switch_discoveries, + platformType=platform_type, + preserveConfig=preserve_config, + snmpV3AuthProtocol=auth_proto, + username=username, + password=password, + ) + + payload = add_request.to_payload() + serial_numbers = [d.get("serialNumber") for _, d in switches] + log.info( + f"Bulk adding {len(switches)} switches to fabric " + f"{self.ctx.fabric}: {', '.join(serial_numbers)}" + ) + log.debug(f"Add endpoint: {endpoint.path}") + log.debug(f"Add payload (password masked): {mask_password(payload)}") + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = ( + f"Bulk add switches to fabric '{self.ctx.fabric}' failed " + f"for {', '.join(serial_numbers)}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "create" + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_task_result() + + if not result.get("success"): + msg = ( + f"Bulk add switches failed for " + f"{', '.join(serial_numbers)}: {response}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + return response + + def bulk_delete( + self, + switches: List[Union[SwitchDataModel, SwitchDiscoveryModel]], + ) -> List[str]: + """Remove multiple switches from the fabric. + + Args: + switches: Switch models to delete. + + Returns: + List of switch identifiers submitted for deletion. + + Raises: + SwitchOperationError: Raised when the delete API call fails. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_delete()") + + if nd.module.check_mode: + log.debug("Check mode: Skipping actual deletion") + return [] + + serial_numbers: List[str] = [] + for switch in switches: + sn = None + if hasattr(switch, 'switch_id'): + sn = switch.switch_id + elif hasattr(switch, 'serial_number'): + sn = switch.serial_number + + if sn: + serial_numbers.append(sn) + else: + ip = getattr(switch, 'fabric_management_ip', None) or getattr(switch, 'ip', None) + log.warning(f"Cannot delete switch {ip}: no serial number/switch_id") + + if not serial_numbers: + log.warning("No valid serial numbers found for deletion") + log.debug("EXIT: bulk_delete() - nothing to delete") + return [] + + endpoint = V1ManageFabricSwitchActionsRemovePost() + endpoint.fabric_name = self.ctx.fabric + payload = {"switchIds": serial_numbers} + + log.info( + f"Bulk removing {len(serial_numbers)} switch(es) from fabric " + f"{self.ctx.fabric}: {serial_numbers}" + ) + log.debug(f"Delete endpoint: {endpoint.path}") + log.debug(f"Delete payload: {payload}") + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "delete" + results.response_current = response + results.result_current = result + results.diff_current = {"deleted": serial_numbers} + results.register_task_result() + + log.info(f"Bulk delete submitted for {len(serial_numbers)} switch(es)") + log.debug("EXIT: bulk_delete()") + return serial_numbers + + except Exception as e: + log.error(f"Bulk delete failed: {e}") + raise SwitchOperationError( + f"Bulk delete failed for {serial_numbers}: {e}" + ) from e + + def bulk_save_credentials( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + ) -> None: + """Save switch credentials grouped by username and password. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_save_credentials()") + + cred_groups: Dict[Tuple[str, str], List[str]] = {} + for sn, cfg in switch_actions: + if not cfg.user_name or not cfg.password: + log.debug(f"Skipping credentials for {sn}: missing user_name or password") + continue + key = (cfg.user_name, cfg.password) + cred_groups.setdefault(key, []).append(sn) + + if not cred_groups: + log.debug("EXIT: bulk_save_credentials() - no credentials to save") + return + + endpoint = V1ManageCredentialsSwitchesPost() + + for (username, password), serial_numbers in cred_groups.items(): + creds_request = SwitchCredentialsRequestModel( + switchIds=serial_numbers, + switchUsername=username, + switchPassword=password, + ) + payload = creds_request.to_payload() + + log.info( + f"Saving credentials for {len(serial_numbers)} switch(es): {serial_numbers}" + ) + log.debug(f"Credentials endpoint: {endpoint.path}") + log.debug( + f"Credentials payload (masked): {mask_password(payload)}" + ) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "save_credentials" + results.response_current = response + results.result_current = result + results.diff_current = { + "switchIds": serial_numbers, + "username": username, + } + results.register_task_result() + log.info(f"Credentials saved for {len(serial_numbers)} switch(es)") + except Exception as e: + msg = ( + f"Failed to save credentials for " + f"switches {serial_numbers}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.debug("EXIT: bulk_save_credentials()") + + def bulk_update_roles( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + ) -> None: + """Update switch roles in bulk. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_update_roles()") + + switch_roles = [] + for sn, cfg in switch_actions: + role = get_switch_field(cfg, ['role']) + if not role: + continue + role_value = role.value if isinstance(role, SwitchRole) else str(role) + switch_roles.append({"switchId": sn, "role": role_value}) + + if not switch_roles: + log.debug("EXIT: bulk_update_roles() - no roles to update") + return + + endpoint = V1ManageFabricSwitchActionsChangeRolesPost() + endpoint.fabric_name = self.ctx.fabric + payload = {"switchRoles": switch_roles} + + log.info(f"Bulk updating roles for {len(switch_roles)} switch(es)") + log.debug(f"ChangeRoles endpoint: {endpoint.path}") + log.debug(f"ChangeRoles payload: {payload}") + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "update_role" + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_task_result() + log.info(f"Roles updated for {len(switch_roles)} switch(es)") + except Exception as e: + msg = ( + f"Failed to bulk update roles for switches: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.debug("EXIT: bulk_update_roles()") + + def finalize(self) -> None: + """Run optional save and deploy actions for the fabric. + + Uses service context flags to decide whether save and deploy should be + executed. No-op in check mode. + + Returns: + None. + """ + if self.ctx.nd.module.check_mode: + return + + if self.ctx.save_config: + self.ctx.log.info("Saving fabric configuration") + self.fabric_utils.save_config() + + if self.ctx.deploy_config: + self.ctx.log.info("Deploying fabric configuration") + self.fabric_utils.deploy_config() + + def post_add_processing( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + wait_utils, + context: str, + all_preserve_config: bool = False, + skip_greenfield_check: bool = False, + update_roles: bool = False, + ) -> None: + """Run post-add tasks for newly processed switches. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + wait_utils: Wait utility used for manageability checks. + context: Label used in logs and error messages. + all_preserve_config: Whether to use preserve-config wait behavior. + skip_greenfield_check: Whether to skip greenfield wait shortcut. + update_roles: Whether to apply bulk role updates. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + all_serials = [sn for sn, _ in switch_actions] + + log.info( + f"Waiting for {len(all_serials)} {context} " + f"switch(es) to become manageable: {all_serials}" + ) + + wait_kwargs: Dict[str, Any] = {} + if all_preserve_config: + wait_kwargs["all_preserve_config"] = True + if skip_greenfield_check: + wait_kwargs["skip_greenfield_check"] = True + + success = wait_utils.wait_for_switch_manageable( + all_serials, + **wait_kwargs, + ) + if not success: + msg = ( + f"One or more {context} switches failed to become " + f"manageable in fabric '{self.ctx.fabric}'. " + f"Switches: {all_serials}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + self.bulk_save_credentials(switch_actions) + + if update_roles: + self.bulk_update_roles(switch_actions) + + try: + self.finalize() + except Exception as e: + msg = ( + f"Failed to finalize (config-save/deploy) for " + f"{context} switches {all_serials}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + +# ========================================================================= +# POAP Handler (Bootstrap / Pre-Provision) +# ========================================================================= + +class POAPHandler: + """Handle POAP workflows for bootstrap, pre-provision, and serial swap.""" + + def __init__( + self, + ctx: SwitchServiceContext, + fabric_ops: SwitchFabricOps, + wait_utils: SwitchWaitUtils, + ): + """Initialize the POAP workflow handler. + + Args: + ctx: Shared service context. + fabric_ops: Fabric operation service. + wait_utils: Switch wait utility service. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_ops = fabric_ops + self.wait_utils = wait_utils + + def handle( + self, + proposed_config: List[SwitchConfigModel], + existing: Optional[List[SwitchDataModel]] = None, + ) -> None: + """Execute POAP processing for the provided switch configs. + + Args: + proposed_config: Validated switch configs for POAP operations. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: POAPHandler.handle()") + log.info(f"Processing POAP for {len(proposed_config)} switch config(s)") + + # Check mode — preview only + if nd.module.check_mode: + log.info("Check mode: would run POAP bootstrap / pre-provision") + results.action = "poap" + results.response_current = {"MESSAGE": "check mode — skipped"} + results.result_current = {"success": True, "changed": True} + results.diff_current = { + "poap_switches": [pc.seed_ip for pc in proposed_config] + } + results.register_task_result() + return + + # Classify entries + bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] + preprov_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] + + for switch_cfg in proposed_config: + if not switch_cfg.poap: + log.warning( + f"Switch config for {switch_cfg.seed_ip} has no POAP block — skipping" + ) + continue + + for poap_cfg in switch_cfg.poap: + if poap_cfg.serial_number and poap_cfg.preprovision_serial: + swap_entries.append((switch_cfg, poap_cfg)) + elif poap_cfg.preprovision_serial: + preprov_entries.append((switch_cfg, poap_cfg)) + elif poap_cfg.serial_number: + bootstrap_entries.append((switch_cfg, poap_cfg)) + else: + log.warning( + f"POAP entry for {switch_cfg.seed_ip} has neither " + f"serial_number nor preprovision_serial — skipping" + ) + + log.info( + f"POAP classification: {len(bootstrap_entries)} bootstrap, " + f"{len(preprov_entries)} pre-provision, " + f"{len(swap_entries)} swap" + ) + + # Handle swap entries (change serial number on pre-provisioned switches) + if swap_entries: + self._handle_poap_swap(swap_entries, existing or []) + + # Handle bootstrap entries + if bootstrap_entries: + self._handle_poap_bootstrap(bootstrap_entries) + + # Handle pre-provision entries + if preprov_entries: + preprov_models: List[PreProvisionSwitchModel] = [] + for switch_cfg, poap_cfg in preprov_entries: + pp_model = self._build_preprovision_model(switch_cfg, poap_cfg) + preprov_models.append(pp_model) + log.info( + f"Built pre-provision model for serial=" + f"{pp_model.serial_number}, hostname={pp_model.hostname}, " + f"ip={pp_model.ip}" + ) + + if preprov_models: + self._preprovision_switches(preprov_models) + + # Edge case: nothing actionable + if not bootstrap_entries and not preprov_entries and not swap_entries: + log.warning("No POAP switch models built — nothing to process") + results.action = "poap" + results.response_current = {"MESSAGE": "no switches to process"} + results.result_current = {"success": True, "changed": False} + results.diff_current = {} + results.register_task_result() + + log.debug("EXIT: POAPHandler.handle()") + + def _handle_poap_bootstrap( + self, + bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]], + ) -> None: + """Process bootstrap POAP entries. + + Args: + bootstrap_entries: ``(SwitchConfigModel, POAPConfigModel)`` pairs + for bootstrap operations. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + + log.debug("ENTER: _handle_poap_bootstrap()") + log.info(f"Processing {len(bootstrap_entries)} bootstrap entries") + + bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) + bootstrap_idx = build_bootstrap_index(bootstrap_switches) + log.debug( + f"Bootstrap index contains {len(bootstrap_idx)} switch(es): " + f"{list(bootstrap_idx.keys())}" + ) + + import_models: List[BootstrapImportSwitchModel] = [] + for switch_cfg, poap_cfg in bootstrap_entries: + serial = poap_cfg.serial_number + bootstrap_data = bootstrap_idx.get(serial) + + if not bootstrap_data: + msg = ( + f"Serial {serial} not found in bootstrap API " + f"response. The switch is not in the POAP loop. " + f"Ensure the switch is powered on and POAP/DHCP " + f"is enabled in the fabric." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + model = self._build_bootstrap_import_model( + switch_cfg, poap_cfg, bootstrap_data + ) + import_models.append(model) + log.info( + f"Built bootstrap model for serial={serial}, " + f"hostname={model.hostname}, ip={model.ip}" + ) + + if not import_models: + log.warning("No bootstrap import models built") + log.debug("EXIT: _handle_poap_bootstrap()") + return + + self._import_bootstrap_switches(import_models) + + # Post-import: wait for manageability, save credentials, finalize + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, poap_cfg in bootstrap_entries: + switch_actions.append((poap_cfg.serial_number, switch_cfg)) + + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="bootstrap", + skip_greenfield_check=True, + ) + + log.debug("EXIT: _handle_poap_bootstrap()") + + def _build_bootstrap_import_model( + self, + switch_cfg: SwitchConfigModel, + poap_cfg: POAPConfigModel, + bootstrap_data: Optional[Dict[str, Any]], + ) -> BootstrapImportSwitchModel: + """Build a bootstrap import model from config and bootstrap data. + + Args: + switch_cfg: Parent switch config. + poap_cfg: POAP config entry. + bootstrap_data: Matching bootstrap response entry. + + Returns: + Completed ``BootstrapImportSwitchModel`` for API submission. + """ + log = self.ctx.log + log.debug( + f"ENTER: _build_bootstrap_import_model(serial={poap_cfg.serial_number})" + ) + + bs = bootstrap_data or {} + + # User config fields + serial_number = poap_cfg.serial_number + hostname = poap_cfg.hostname + ip = switch_cfg.seed_ip + model = poap_cfg.model + version = poap_cfg.version + image_policy = poap_cfg.image_policy + gateway_ip_mask = poap_cfg.config_data.gateway if poap_cfg.config_data else None + switch_role = switch_cfg.role + password = switch_cfg.password + auth_proto = SnmpV3AuthProtocol.MD5 # POAP/bootstrap always uses MD5 + + discovery_username = getattr(poap_cfg, "discovery_username", None) + discovery_password = getattr(poap_cfg, "discovery_password", None) + + # Bootstrap API response fields + fingerprint = bs.get("fingerPrint", bs.get("fingerprint", "")) + public_key = bs.get("publicKey", "") + re_add = bs.get("reAdd", False) + in_inventory = bs.get("inInventory", False) + + # Shared data block builder + data_block = build_poap_data_block(poap_cfg) + + bootstrap_model = BootstrapImportSwitchModel( + serialNumber=serial_number, + model=model, + version=version, + hostname=hostname, + ipAddress=ip, + password=password, + discoveryAuthProtocol=auth_proto, + discoveryUsername=discovery_username, + discoveryPassword=discovery_password, + data=data_block, + fingerprint=fingerprint, + publicKey=public_key, + reAdd=re_add, + inInventory=in_inventory, + imagePolicy=image_policy or "", + switchRole=switch_role, + ip=ip, + softwareVersion=version, + gatewayIpMask=gateway_ip_mask, + ) + + log.debug( + f"EXIT: _build_bootstrap_import_model() -> {bootstrap_model.serial_number}" + ) + return bootstrap_model + + def _import_bootstrap_switches( + self, + models: List[BootstrapImportSwitchModel], + ) -> None: + """Submit bootstrap import models. + + Args: + models: ``BootstrapImportSwitchModel`` objects to submit. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _import_bootstrap_switches()") + + endpoint = V1ManageFabricSwitchActionsImportBootstrapPost() + endpoint.fabric_name = self.ctx.fabric + + request_model = ImportBootstrapSwitchesRequestModel(switches=models) + payload = request_model.to_payload() + + log.debug(f"importBootstrap endpoint: {endpoint.path}") + log.debug( + f"importBootstrap payload (masked): {mask_password(payload)}" + ) + log.info( + f"Importing {len(models)} bootstrap switch(es): " + f"{[m.serial_number for m in models]}" + ) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = ( + f"importBootstrap API call failed for " + f"{[m.serial_number for m in models]}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "bootstrap" + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_task_result() + + if not result.get("success"): + msg = ( + f"importBootstrap failed for " + f"{[m.serial_number for m in models]}: {response}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info(f"importBootstrap API response success: {result.get('success')}") + log.debug("EXIT: _import_bootstrap_switches()") + + def _build_preprovision_model( + self, + switch_cfg: SwitchConfigModel, + poap_cfg: POAPConfigModel, + ) -> PreProvisionSwitchModel: + """Build a pre-provision model from POAP configuration. + + Args: + switch_cfg: Parent switch config. + poap_cfg: POAP config entry. + + Returns: + Completed ``PreProvisionSwitchModel`` for API submission. + """ + log = self.ctx.log + log.debug( + f"ENTER: _build_preprovision_model(serial={poap_cfg.preprovision_serial})" + ) + + serial_number = poap_cfg.preprovision_serial + hostname = poap_cfg.hostname + ip = switch_cfg.seed_ip + model_name = poap_cfg.model + version = poap_cfg.version + image_policy = poap_cfg.image_policy + gateway_ip_mask = poap_cfg.config_data.gateway if poap_cfg.config_data else None + switch_role = switch_cfg.role + password = switch_cfg.password + auth_proto = SnmpV3AuthProtocol.MD5 # Pre-provision always uses MD5 + + discovery_username = getattr(poap_cfg, "discovery_username", None) + discovery_password = getattr(poap_cfg, "discovery_password", None) + + # Shared data block builder + data_block = build_poap_data_block(poap_cfg) + + preprov_model = PreProvisionSwitchModel( + serialNumber=serial_number, + hostname=hostname, + ip=ip, + model=model_name, + softwareVersion=version, + gatewayIpMask=gateway_ip_mask, + password=password, + discoveryAuthProtocol=auth_proto, + discoveryUsername=discovery_username, + discoveryPassword=discovery_password, + data=data_block, + imagePolicy=image_policy or None, + switchRole=switch_role, + ) + + log.debug( + f"EXIT: _build_preprovision_model() -> {preprov_model.serial_number}" + ) + return preprov_model + + def _preprovision_switches( + self, + models: List[PreProvisionSwitchModel], + ) -> None: + """Submit pre-provision switch models. + + Args: + models: ``PreProvisionSwitchModel`` objects to submit. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _preprovision_switches()") + + endpoint = V1ManageFabricSwitchActionsPreProvisionPost() + endpoint.fabric_name = self.ctx.fabric + + request_model = PreProvisionSwitchesRequestModel(switches=models) + payload = request_model.to_payload() + + log.debug(f"preProvision endpoint: {endpoint.path}") + log.debug( + f"preProvision payload (masked): {mask_password(payload)}" + ) + log.info( + f"Pre-provisioning {len(models)} switch(es): " + f"{[m.serial_number for m in models]}" + ) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = ( + f"preProvision API call failed for " + f"{[m.serial_number for m in models]}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "preprovision" + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_task_result() + + if not result.get("success"): + msg = ( + f"preProvision failed for " + f"{[m.serial_number for m in models]}: {response}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info(f"preProvision API response success: {result.get('success')}") + log.debug("EXIT: _preprovision_switches()") + + def _handle_poap_swap( + self, + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]], + existing: List[SwitchDataModel], + ) -> None: + """Process POAP serial-swap entries. + + Args: + swap_entries: ``(SwitchConfigModel, POAPConfigModel)`` swap pairs. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + fabric = self.ctx.fabric + + log.debug("ENTER: _handle_poap_swap()") + log.info(f"Processing {len(swap_entries)} POAP swap entries") + + # ------------------------------------------------------------------ + # Step 1: Validate preprovision serials exist in fabric inventory + # ------------------------------------------------------------------ + fabric_index: Dict[str, Dict[str, Any]] = { + sw.switch_id: sw.model_dump(by_alias=True) + for sw in existing + if sw.switch_id + } + log.debug( + f"Fabric inventory contains {len(fabric_index)} switch(es): " + f"{list(fabric_index.keys())}" + ) + + for switch_cfg, poap_cfg in swap_entries: + old_serial = poap_cfg.preprovision_serial + if old_serial not in fabric_index: + msg = ( + f"Pre-provisioned serial '{old_serial}' not found in " + f"fabric '{fabric}' inventory. The switch must be " + f"pre-provisioned before a swap can be performed." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + log.info( + f"Validated: pre-provisioned serial '{old_serial}' exists " + f"in fabric inventory" + ) + + # ------------------------------------------------------------------ + # Step 2: Validate new serials exist in bootstrap list + # ------------------------------------------------------------------ + bootstrap_switches = query_bootstrap_switches(nd, fabric, log) + bootstrap_index = build_bootstrap_index(bootstrap_switches) + log.debug( + f"Bootstrap list contains {len(bootstrap_index)} switch(es): " + f"{list(bootstrap_index.keys())}" + ) + + for switch_cfg, poap_cfg in swap_entries: + new_serial = poap_cfg.serial_number + if new_serial not in bootstrap_index: + msg = ( + f"New serial '{new_serial}' not found in the bootstrap " + f"(POAP) list for fabric '{fabric}'. The physical " + f"switch must be in the POAP loop before a swap can be " + f"performed." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + log.info( + f"Validated: new serial '{new_serial}' exists in " + f"bootstrap list" + ) + + # ------------------------------------------------------------------ + # Step 3: Call changeSwitchSerialNumber for each swap entry + # ------------------------------------------------------------------ + for switch_cfg, poap_cfg in swap_entries: + old_serial = poap_cfg.preprovision_serial + new_serial = poap_cfg.serial_number + + log.info( + f"Swapping serial for pre-provisioned switch: " + f"{old_serial} → {new_serial}" + ) + + endpoint = V1ManageFabricSwitchChangeSerialNumberPost() + endpoint.fabric_name = fabric + endpoint.switch_sn = old_serial + + request_body = ChangeSwitchSerialNumberRequestModel( + newSwitchId=new_serial + ) + payload = request_body.to_payload() + + log.debug(f"changeSwitchSerialNumber endpoint: {endpoint.path}") + log.debug(f"changeSwitchSerialNumber payload: {payload}") + + try: + nd.request( + path=endpoint.path, verb=endpoint.verb, data=payload + ) + except Exception as e: + msg = ( + f"changeSwitchSerialNumber API call failed for " + f"{old_serial} → {new_serial}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "swap_serial" + results.response_current = response + results.result_current = result + results.diff_current = { + "old_serial": old_serial, + "new_serial": new_serial, + } + results.register_task_result() + + if not result.get("success"): + msg = ( + f"Failed to swap serial number from {old_serial} " + f"to {new_serial}: {response}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info( + f"Serial number swap successful: {old_serial} → {new_serial}" + ) + + # ------------------------------------------------------------------ + # Step 4: Re-query bootstrap API for post-swap data + # ------------------------------------------------------------------ + post_swap_bootstrap = query_bootstrap_switches(nd, fabric, log) + post_swap_index = build_bootstrap_index(post_swap_bootstrap) + log.debug( + f"Post-swap bootstrap list contains " + f"{len(post_swap_index)} switch(es)" + ) + + # ------------------------------------------------------------------ + # Step 5: Build BootstrapImportSwitchModels and POST importBootstrap + # ------------------------------------------------------------------ + import_models: List[BootstrapImportSwitchModel] = [] + for switch_cfg, poap_cfg in swap_entries: + new_serial = poap_cfg.serial_number + bootstrap_data = post_swap_index.get(new_serial) + + if not bootstrap_data: + msg = ( + f"Serial '{new_serial}' not found in bootstrap API " + f"response after swap. The controller may not have " + f"updated the bootstrap list yet." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + model = self._build_bootstrap_import_model( + switch_cfg, poap_cfg, bootstrap_data + ) + import_models.append(model) + log.info( + f"Built bootstrap model for swapped serial={new_serial}, " + f"hostname={model.hostname}, ip={model.ip}" + ) + + if not import_models: + log.warning("No bootstrap import models built after swap") + log.debug("EXIT: _handle_poap_swap()") + return + + try: + self._import_bootstrap_switches(import_models) + except Exception as e: + msg = ( + f"importBootstrap failed after serial swap: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + # ------------------------------------------------------------------ + # Step 6: Wait for manageability, save credentials, finalize + # ------------------------------------------------------------------ + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, poap_cfg in swap_entries: + switch_actions.append((poap_cfg.serial_number, switch_cfg)) + + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="swap", + skip_greenfield_check=True, + ) + + log.info( + f"POAP swap completed successfully for {len(swap_entries)} " + f"switch(es): {[sn for sn, _ in switch_actions]}" + ) + log.debug("EXIT: _handle_poap_swap()") + + +# ========================================================================= +# RMA Handler (Return Material Authorization) +# ========================================================================= + +class RMAHandler: + """Handle RMA workflows for switch replacement.""" + + def __init__( + self, + ctx: SwitchServiceContext, + fabric_ops: SwitchFabricOps, + wait_utils: SwitchWaitUtils, + ): + """Initialize the RMA workflow handler. + + Args: + ctx: Shared service context. + fabric_ops: Fabric operation service. + wait_utils: Switch wait utility service. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_ops = fabric_ops + self.wait_utils = wait_utils + + def handle( + self, + proposed_config: List[SwitchConfigModel], + existing: List[SwitchDataModel], + ) -> None: + """Execute RMA processing for the provided switch configs. + + Args: + proposed_config: Validated switch configs for RMA operations. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: RMAHandler.handle()") + log.info(f"Processing RMA for {len(proposed_config)} switch config(s)") + + # Check mode — preview only + if nd.module.check_mode: + log.info("Check mode: would run RMA provision") + results.action = "rma" + results.response_current = {"MESSAGE": "check mode — skipped"} + results.result_current = {"success": True, "changed": True} + results.diff_current = { + "rma_switches": [pc.seed_ip for pc in proposed_config] + } + results.register_task_result() + return + + # Collect (SwitchConfigModel, RMAConfigModel) pairs + rma_entries: List[Tuple[SwitchConfigModel, RMAConfigModel]] = [] + for switch_cfg in proposed_config: + if not switch_cfg.rma: + log.warning( + f"Switch config for {switch_cfg.seed_ip} has no RMA block — skipping" + ) + continue + for rma_cfg in switch_cfg.rma: + rma_entries.append((switch_cfg, rma_cfg)) + + if not rma_entries: + log.warning("No RMA entries found — nothing to process") + results.action = "rma" + results.response_current = {"MESSAGE": "no switches to process"} + results.result_current = {"success": True, "changed": False} + results.diff_current = {} + results.register_task_result() + return + + log.info(f"Found {len(rma_entries)} RMA entry/entries to process") + + # Validate old switches exist and are in correct state + old_switch_info = self._validate_prerequisites(rma_entries, existing) + + # Query bootstrap API for publicKey / fingerPrint of new switches + bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) + bootstrap_idx = build_bootstrap_index(bootstrap_switches) + log.debug( + f"Bootstrap index contains {len(bootstrap_idx)} switch(es): " + f"{list(bootstrap_idx.keys())}" + ) + + # Build and submit each RMA request + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, rma_cfg in rma_entries: + new_serial = rma_cfg.serial_number + bootstrap_data = bootstrap_idx.get(new_serial) + + if not bootstrap_data: + msg = ( + f"New switch serial {new_serial} not found in " + f"bootstrap API response. The switch is not in the " + f"POAP loop. Ensure the replacement switch is powered " + f"on and POAP/DHCP is enabled in the fabric." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + rma_model = self._build_rma_model( + switch_cfg, rma_cfg, bootstrap_data, + old_switch_info[rma_cfg.old_serial], + ) + log.info( + f"Built RMA model: replacing {rma_cfg.old_serial} with " + f"{rma_model.new_switch_id}" + ) + + self._provision_rma_switch(rma_cfg.old_serial, rma_model) + switch_actions.append((rma_model.new_switch_id, switch_cfg)) + + # Post-processing: wait, save credentials, finalize + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="RMA", + skip_greenfield_check=True, + ) + + log.debug("EXIT: RMAHandler.handle()") + + def _validate_prerequisites( + self, + rma_entries: List[Tuple[SwitchConfigModel, RMAConfigModel]], + existing: List[SwitchDataModel], + ) -> Dict[str, Dict[str, Any]]: + """Validate RMA prerequisites for each requested replacement. + + Args: + rma_entries: ``(SwitchConfigModel, RMAConfigModel)`` pairs. + existing: Current fabric inventory snapshot. + + Returns: + Dict keyed by old serial with prerequisite metadata. + """ + nd = self.ctx.nd + log = self.ctx.log + + log.debug("ENTER: _validate_prerequisites()") + + existing_by_serial: Dict[str, SwitchDataModel] = { + sw.serial_number: sw for sw in existing if sw.serial_number + } + + result: Dict[str, Dict[str, Any]] = {} + + for switch_cfg, rma_cfg in rma_entries: + old_serial = rma_cfg.old_serial + + old_switch = existing_by_serial.get(old_serial) + if old_switch is None: + nd.module.fail_json( + msg=( + f"RMA: old_serial '{old_serial}' not found in " + f"fabric '{self.ctx.fabric}'. The switch being " + f"replaced must exist in the inventory." + ) + ) + + ad = old_switch.additional_data + if ad is None: + nd.module.fail_json( + msg=( + f"RMA: Switch '{old_serial}' has no additional data " + f"in the inventory response. Cannot verify discovery " + f"status and system mode." + ) + ) + + if ad.discovery_status != DiscoveryStatus.UNREACHABLE.value: + nd.module.fail_json( + msg=( + f"RMA: Switch '{old_serial}' has discovery status " + f"'{ad.discovery_status or 'unknown'}', " + f"expected 'unreachable'. The old switch must be " + f"unreachable before RMA can proceed." + ) + ) + + if ad.system_mode != SystemMode.MAINTENANCE.value: + nd.module.fail_json( + msg=( + f"RMA: Switch '{old_serial}' is in " + f"'{ad.system_mode or 'unknown'}' " + f"mode, expected 'maintenance'. Put the switch in " + f"maintenance mode before initiating RMA." + ) + ) + + result[old_serial] = { + "hostname": old_switch.hostname or "", + "switch_data": old_switch, + } + log.info( + f"RMA prerequisite check passed for old_serial " + f"'{old_serial}' (hostname={old_switch.hostname}, " + f"discovery={ad.discovery_status}, mode={ad.system_mode})" + ) + + log.debug("EXIT: _validate_prerequisites()") + return result + + def _build_rma_model( + self, + switch_cfg: SwitchConfigModel, + rma_cfg: RMAConfigModel, + bootstrap_data: Dict[str, Any], + old_switch_info: Dict[str, Any], + ) -> RMASwitchModel: + """Build an RMA model from config and bootstrap data. + + Args: + switch_cfg: Parent switch config. + rma_cfg: RMA config entry. + bootstrap_data: Bootstrap response entry for the replacement switch. + old_switch_info: Prerequisite metadata for the switch being replaced. + + Returns: + Completed ``RMASwitchModel`` for API submission. + """ + log = self.ctx.log + log.debug( + f"ENTER: _build_rma_model(new={rma_cfg.serial_number}, " + f"old={rma_cfg.old_serial})" + ) + + # User config fields + new_switch_id = rma_cfg.serial_number + hostname = old_switch_info.get("hostname", "") + ip = switch_cfg.seed_ip + model_name = rma_cfg.model + version = rma_cfg.version + image_policy = rma_cfg.image_policy + gateway_ip_mask = rma_cfg.config_data.gateway + switch_role = switch_cfg.role + password = switch_cfg.password + auth_proto = SnmpV3AuthProtocol.MD5 # RMA always uses MD5 + + discovery_username = rma_cfg.discovery_username + discovery_password = rma_cfg.discovery_password + + # Bootstrap API response fields + public_key = bootstrap_data.get("publicKey", "") + finger_print = bootstrap_data.get( + "fingerPrint", bootstrap_data.get("fingerprint", "") + ) + + rma_model = RMASwitchModel( + gatewayIpMask=gateway_ip_mask, + model=model_name, + softwareVersion=version, + imagePolicy=image_policy, + switchRole=switch_role, + password=password, + discoveryAuthProtocol=auth_proto, + discoveryUsername=discovery_username, + discoveryPassword=discovery_password, + hostname=hostname, + ip=ip, + newSwitchId=new_switch_id, + publicKey=public_key, + fingerPrint=finger_print, + ) + + log.debug( + f"EXIT: _build_rma_model() -> newSwitchId={rma_model.new_switch_id}" + ) + return rma_model + + def _provision_rma_switch( + self, + old_switch_id: str, + rma_model: RMASwitchModel, + ) -> None: + """Submit an RMA provisioning request for one switch. + + Args: + old_switch_id: Identifier of the switch being replaced. + rma_model: RMA model for the replacement switch. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _provision_rma_switch()") + + endpoint = V1ManageFabricSwitchProvisionRMAPost() + endpoint.fabric_name = self.ctx.fabric + endpoint.switch_id = old_switch_id + + payload = rma_model.to_payload() + + log.info(f"RMA: Replacing {old_switch_id} with {rma_model.new_switch_id}") + log.debug(f"RMA endpoint: {endpoint.path}") + log.debug(f"RMA payload (masked): {mask_password(payload)}") + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = ( + f"RMA provision API call failed for " + f"{old_switch_id} → {rma_model.new_switch_id}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "rma" + results.response_current = response + results.result_current = result + results.diff_current = { + "old_switch_id": old_switch_id, + "new_switch_id": rma_model.new_switch_id, + } + results.register_task_result() + + if not result.get("success"): + msg = ( + f"RMA provision failed for {old_switch_id} → " + f"{rma_model.new_switch_id}: {response}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info(f"RMA provision API response success: {result.get('success')}") + log.debug("EXIT: _provision_rma_switch()") + + +# ========================================================================= +# Orchestrator (Thin State Router) +# ========================================================================= + +class NDSwitchResourceModule(): + """Orchestrate switch lifecycle management across supported states.""" + + # ===================================================================== + # Initialization & Lifecycle + # ===================================================================== + + def __init__( + self, + nd: NDModule, + results: Results, + logger: Optional[logging.Logger] = None, + ): + """Initialize module state, services, and inventory snapshots. + + Args: + nd: ND module wrapper. + results: Shared results aggregator. + logger: Optional logger instance. + + Returns: + None. + """ + log = logger or logging.getLogger("nd.NDSwitchResourceModule") + self.log = log + self.nd = nd + self.module = nd.module + self.results = results + + # Module parameters + self.config = self.module.params.get("config", {}) + self.fabric = self.module.params.get("fabric") + self.state = self.module.params.get("state") + + # Shared context for service classes + self.ctx = SwitchServiceContext( + nd=nd, + results=results, + fabric=self.fabric, + log=log, + save_config=self.module.params.get("save", True), + deploy_config=self.module.params.get("deploy", True), + ) + + # Switch collections + try: + self.proposed: List[SwitchDataModel] = [] + self.existing: List[SwitchDataModel] = [ + SwitchDataModel.model_validate(sw) + for sw in self._query_all_switches() + ] + self.previous: List[SwitchDataModel] = deepcopy(self.existing) + except Exception as e: + msg = ( + f"Failed to query fabric '{self.fabric}' inventory " + f"during initialization: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + # Operation tracking + self.nd_logs: List[Dict[str, Any]] = [] + + # Utility instances (SwitchWaitUtils / FabricUtils depend on self) + self.fabric_utils = FabricUtils(self.nd, self.fabric, log) + self.wait_utils = SwitchWaitUtils( + self, self.fabric, log, fabric_utils=self.fabric_utils + ) + + # Service instances (Dependency Injection) + self.discovery = SwitchDiscoveryService(self.ctx) + self.fabric_ops = SwitchFabricOps(self.ctx, self.fabric_utils) + self.poap_handler = POAPHandler(self.ctx, self.fabric_ops, self.wait_utils) + self.rma_handler = RMAHandler(self.ctx, self.fabric_ops, self.wait_utils) + + log.info(f"Initialized NDSwitchResourceModule for fabric: {self.fabric}") + + def exit_json(self) -> None: + """Finalize collected results and exit the Ansible module. + + Includes operation logs and previous/current inventory snapshots in the + final response payload. + + Returns: + None. + """ + self.results.build_final_result() + final = self.results.final_result + + final["logs"] = self.nd_logs + final["previous"] = ( + [sw.model_dump(by_alias=True) for sw in self.previous] + if self.previous + else [] + ) + final["current"] = ( + [sw.model_dump(by_alias=True) for sw in self.existing] + if self.existing + else [] + ) + + if True in self.results.failed: + self.nd.module.fail_json(**final) + self.nd.module.exit_json(**final) + + # ===================================================================== + # Public API – State Management + # ===================================================================== + + def manage_state(self) -> None: + """Dispatch the requested module state to the appropriate workflow. + + This method validates input, routes POAP and RMA operations to dedicated + handlers, and executes state-specific orchestration for query, merged, + overridden, and deleted operations. + + Returns: + None. + """ + self.log.info(f"Managing state: {self.state}") + + # query / deleted — config is optional + if self.state in ("query", "deleted"): + proposed_config = ( + SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) + if self.config + else None + ) + if self.state == "deleted": + return self._handle_deleted_state(proposed_config) + return self._handle_query_state(proposed_config) + + # merged / overridden — config is required + if not self.config: + self.nd.module.fail_json( + msg=f"'config' is required for '{self.state}' state." + ) + + proposed_config = SwitchDiffEngine.validate_configs( + self.config, self.state, self.nd, self.log + ) + self.operation_type = proposed_config[0].operation_type + + # POAP and RMA bypass normal discovery — delegate to handlers + if self.operation_type == "poap": + return self.poap_handler.handle(proposed_config, self.existing) + if self.operation_type == "rma": + return self.rma_handler.handle(proposed_config, self.existing) + + # Normal: discover → build proposed models → compute diff → delegate + discovered_data = self.discovery.discover(proposed_config) + self.proposed = self.discovery.build_proposed( + proposed_config, discovered_data, self.existing + ) + diff = SwitchDiffEngine.compute_changes( + self.proposed, self.existing, self.log + ) + + state_handlers = { + "merged": self._handle_merged_state, + "overridden": self._handle_overridden_state, + } + handler = state_handlers.get(self.state) + if handler is None: + self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") + return handler(diff, proposed_config, discovered_data) + + # ===================================================================== + # State Handlers (orchestration only — delegate to services) + # ===================================================================== + + def _handle_query_state( + self, + proposed_config: Optional[List[SwitchConfigModel]] = None, + ) -> None: + """Return inventory switches matching the optional proposed config. + + Args: + proposed_config: Optional filter config list for matching switches. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_query_state()") + self.log.info("Handling query state") + self.log.debug(f"Found {len(self.existing)} existing switches") + + if proposed_config is None: + matched_switches = list(self.existing) + self.log.info("No proposed config — returning all existing switches") + else: + matched_switches: List[SwitchDataModel] = [] + for cfg in proposed_config: + match = next( + ( + sw for sw in self.existing + if sw.fabric_management_ip == cfg.seed_ip + ), + None, + ) + if match is None: + self.log.info(f"Switch {cfg.seed_ip} not found in fabric") + continue + + if cfg.role is not None and match.switch_role != cfg.role: + self.log.info( + f"Switch {cfg.seed_ip} found but role mismatch: " + f"expected {cfg.role.value}, got " + f"{match.switch_role.value if match.switch_role else 'None'}" + ) + continue + + matched_switches.append(match) + + self.log.info( + f"Matched {len(matched_switches)}/{len(proposed_config)} " + f"switch(es) from proposed config" + ) + + switch_data = [sw.model_dump(by_alias=True) for sw in matched_switches] + + self.results.action = "query" + self.results.state = self.state + self.results.check_mode = self.nd.module.check_mode + self.results.operation_type = OperationType.QUERY + self.results.response_current = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": switch_data, + } + self.results.result_current = { + "found": len(matched_switches) > 0, + "success": True, + } + self.results.diff_current = {} + self.results.register_task_result() + + self.log.debug(f"Returning {len(switch_data)} switches in results") + self.log.debug("EXIT: _handle_query_state()") + + def _handle_merged_state( + self, + diff: Dict[str, List[SwitchDataModel]], + proposed_config: List[SwitchConfigModel], + discovered_data: Optional[Dict[str, Dict[str, Any]]] = None, + ) -> None: + """Handle merged-state add and migration workflows. + + Args: + diff: Categorized switch diff output. + proposed_config: Validated switch config list. + discovered_data: Optional discovery data by seed IP. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_merged_state()") + self.log.info("Handling merged state") + self.log.debug(f"Proposed configs: {len(self.proposed)}") + self.log.debug(f"Existing switches: {len(self.existing)}") + + if not self.proposed: + self.log.info("No configurations provided for merged state") + self.log.debug("EXIT: _handle_merged_state() - no configs") + return + + config_by_ip = {sw.seed_ip: sw for sw in proposed_config} + + # Phase 1: Log idempotent switches + for sw in diff.get("idempotent", []): + self.log.info( + f"Switch {sw.fabric_management_ip} ({sw.switch_id}) " + f"is idempotent - no changes needed" + ) + + # Phase 2: Warn about to_update (merged state doesn't support updates) + if diff.get("to_update"): + ips = [sw.fabric_management_ip for sw in diff["to_update"]] + self.log.warning( + f"Switches require updates which is not supported in merged state. " + f"Use overridden state for updates. Affected switches: {ips}" + ) + + switches_to_add = diff.get("to_add", []) + migration_switches = diff.get("migration_mode", []) + + if not switches_to_add and not migration_switches: + self.log.info("No switches need adding or migration processing") + return + + # Check mode — preview only + if self.nd.module.check_mode: + self.log.info( + f"Check mode: would add {len(switches_to_add)} and " + f"process {len(migration_switches)} migration switches" + ) + self.results.action = "merge" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": True} + self.results.diff_current = { + "to_add": [sw.fabric_management_ip for sw in switches_to_add], + "migration_mode": [sw.fabric_management_ip for sw in migration_switches], + } + self.results.register_task_result() + return + + # Collect (serial_number, SwitchConfigModel) pairs for post-processing + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + + # Phase 3: Bulk add new switches to fabric + if switches_to_add and discovered_data: + add_configs = [] + for sw in switches_to_add: + cfg = config_by_ip.get(sw.fabric_management_ip) + if cfg: + add_configs.append(cfg) + else: + self.log.warning( + f"No config found for switch {sw.fabric_management_ip}, skipping add" + ) + + if add_configs: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, password_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + + pairs = [] + for cfg in group_switches: + disc = discovered_data.get(cfg.seed_ip) + if disc: + pairs.append((cfg, disc)) + else: + self.log.warning(f"No discovery data for {cfg.seed_ip}, skipping") + + if not pairs: + continue + + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + + # Phase 4: Collect migration switches for post-processing + for mig_sw in migration_switches: + cfg = config_by_ip.get(mig_sw.fabric_management_ip) + if cfg and mig_sw.switch_id: + switch_actions.append((mig_sw.switch_id, cfg)) + self._log_operation("migrate", mig_sw.fabric_management_ip) + + if not switch_actions: + self.log.info("No switch actions to process after add/migration collection") + return + + # Common post-processing for all switches (new + migration) + # Brownfield optimisation: if every switch in this batch uses + # preserve_config=True the switches will NOT reload after being + # added to the fabric. Passing this flag lets the wait utility + # skip the unreachable/reload detection phases. + all_preserve_config = all( + cfg.preserve_config for _, cfg in switch_actions + ) + if all_preserve_config: + self.log.info( + "All switches in batch are brownfield (preserve_config=True) — " + "reload detection will be skipped" + ) + + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="merged", + all_preserve_config=all_preserve_config, + update_roles=True, + ) + + self.log.debug("EXIT: _handle_merged_state() - completed") + + def _handle_overridden_state( + self, + diff: Dict[str, List[SwitchDataModel]], + proposed_config: List[SwitchConfigModel], + discovered_data: Optional[Dict[str, Dict[str, Any]]] = None, + ) -> None: + """Handle overridden-state reconciliation for the fabric. + + Args: + diff: Categorized switch diff output. + proposed_config: Validated switch config list. + discovered_data: Optional discovery data by seed IP. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_overridden_state()") + self.log.info("Handling overridden state") + + if not self.proposed: + self.log.warning("No configurations provided for overridden state") + return + + # Check mode — preview only + if self.nd.module.check_mode: + n_delete = len(diff.get("to_delete", [])) + n_update = len(diff.get("to_update", [])) + n_add = len(diff.get("to_add", [])) + n_migrate = len(diff.get("migration_mode", [])) + self.log.info( + f"Check mode: would delete {n_delete}, " + f"delete-and-re-add {n_update}, " + f"add {n_add}, migrate {n_migrate}" + ) + would_change = (n_delete + n_update + n_add + n_migrate) > 0 + self.results.action = "override" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": would_change} + self.results.diff_current = { + "to_delete": n_delete, + "to_update": n_update, + "to_add": n_add, + "migration_mode": n_migrate, + } + self.results.register_task_result() + return + + switches_to_delete: List[SwitchDataModel] = [] + + # Phase 1: Switches not in proposed config + for sw in diff.get("to_delete", []): + self.log.info( + f"Marking for deletion (not in proposed): " + f"{sw.fabric_management_ip} ({sw.switch_id})" + ) + switches_to_delete.append(sw) + self._log_operation("delete", sw.fabric_management_ip) + + # Phase 2: Switches that need updating (delete-then-re-add) + for sw in diff.get("to_update", []): + existing_sw = next( + (e for e in self.existing + if e.switch_id == sw.switch_id + or e.fabric_management_ip == sw.fabric_management_ip), + None, + ) + if existing_sw: + self.log.info( + f"Marking for deletion (re-add update): " + f"{existing_sw.fabric_management_ip} ({existing_sw.switch_id})" + ) + switches_to_delete.append(existing_sw) + self._log_operation("delete_for_update", existing_sw.fabric_management_ip) + + diff["to_add"].append(sw) + + if switches_to_delete: + try: + self.fabric_ops.bulk_delete(switches_to_delete) + except SwitchOperationError as e: + msg = ( + f"Failed to delete switches during overridden state: {e}" + ) + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + + diff["to_update"] = [] + + # Phase 3: Delegate add + migration to merged state + self._handle_merged_state(diff, proposed_config, discovered_data) + self.log.debug("EXIT: _handle_overridden_state()") + + def _handle_deleted_state( + self, + proposed_config: Optional[List[SwitchConfigModel]] = None, + ) -> None: + """Handle deleted-state switch removal. + + Args: + proposed_config: Optional config list that limits deletion scope. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_deleted_state()") + self.log.info("Handling deleted state") + + if proposed_config is None: + switches_to_delete = list(self.existing) + self.log.info( + f"No proposed config — targeting all {len(switches_to_delete)} " + f"existing switch(es) for deletion" + ) + for sw in switches_to_delete: + self._log_operation("delete", sw.fabric_management_ip) + else: + switches_to_delete: List[SwitchDataModel] = [] + for switch_config in proposed_config: + identifier = switch_config.seed_ip + self.log.debug(f"Looking for switch to delete with seed IP: {identifier}") + existing_switch = next( + (sw for sw in self.existing if sw.fabric_management_ip == identifier), + None, + ) + if existing_switch: + self.log.info( + f"Marking for deletion: {identifier} ({existing_switch.switch_id})" + ) + switches_to_delete.append(existing_switch) + else: + self.log.info(f"Switch not found for deletion: {identifier}") + + self.log.info(f"Total switches marked for deletion: {len(switches_to_delete)}") + if not switches_to_delete: + self.log.info("No switches to delete") + return + + # Check mode — preview only + if self.nd.module.check_mode: + self.log.info(f"Check mode: would delete {len(switches_to_delete)} switch(es)") + self.results.action = "delete" + self.results.state = self.state + self.results.operation_type = OperationType.DELETE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": True} + self.results.diff_current = { + "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], + } + self.results.register_task_result() + return + + self.log.info( + f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" + ) + self.fabric_ops.bulk_delete(switches_to_delete) + self.log.debug("EXIT: _handle_deleted_state()") + + # ===================================================================== + # Query Helpers + # ===================================================================== + + def _query_all_switches(self) -> List[Dict[str, Any]]: + """Query all switches from the fabric inventory API. + + Returns: + List of raw switch dictionaries returned by the controller. + """ + endpoint = V1ManageFabricSwitchesGet() + endpoint.fabric_name = self.fabric + self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") + self.log.debug(f"Query verb: {endpoint.verb}") + + try: + result = self.nd.request(path=endpoint.path, verb=endpoint.verb) + except Exception as e: + msg = ( + f"Failed to query switches from " + f"fabric '{self.fabric}': {e}" + ) + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + + if isinstance(result, list): + switches = result + elif isinstance(result, dict): + switches = result.get("switches", []) + else: + switches = [] + + self.log.debug(f"Queried {len(switches)} switches from fabric {self.fabric}") + return switches + + # ===================================================================== + # Operation Tracking + # ===================================================================== + + def _log_operation(self, operation: str, identifier: str) -> None: + """Append a successful operation record to the module log. + + Args: + operation: Operation label. + identifier: Switch identifier for the operation. + + Returns: + None. + """ + self.nd_logs.append({ + "operation": operation, + "identifier": identifier, + "status": "success", + }) diff --git a/plugins/module_utils/utils/nd_manage_switches/__init__.py b/plugins/module_utils/utils/nd_manage_switches/__init__.py new file mode 100644 index 00000000..ff3d215b --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/__init__.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Akshayant Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""nd_manage_switches utilities package. + +Re-exports all utility classes, functions, and exceptions so that +consumers can import directly from the package: + + from .utils.nd_manage_switches import ( + SwitchOperationError, PayloadUtils, FabricUtils, SwitchWaitUtils, + mask_password, get_switch_field, determine_operation_type, + group_switches_by_credentials, query_bootstrap_switches, + build_bootstrap_index, build_poap_data_block, + ) +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from .exceptions import SwitchOperationError # noqa: F401 +from .payload_utils import PayloadUtils, mask_password # noqa: F401 +from .fabric_utils import FabricUtils # noqa: F401 +from .switch_wait_utils import SwitchWaitUtils # noqa: F401 +from .switch_helpers import ( # noqa: F401 + get_switch_field, + determine_operation_type, + group_switches_by_credentials, +) +from .bootstrap_utils import ( # noqa: F401 + query_bootstrap_switches, + build_bootstrap_index, + build_poap_data_block, +) + + +__all__ = [ + "SwitchOperationError", + "PayloadUtils", + "FabricUtils", + "SwitchWaitUtils", + "mask_password", + "get_switch_field", + "determine_operation_type", + "group_switches_by_credentials", + "query_bootstrap_switches", + "build_bootstrap_index", + "build_poap_data_block", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py new file mode 100644 index 00000000..1356428a --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Bootstrap API helpers for POAP switch queries, serial-number indexing, and payload construction.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from typing import Any, Dict, List, Optional + +from ...endpoints.v1.nd_manage_switches.manage_fabric_bootstrap import ( + V1ManageFabricBootstrapGet, +) + + +def query_bootstrap_switches( + nd, + fabric: str, + log: logging.Logger, +) -> List[Dict[str, Any]]: + """GET switches currently in the bootstrap (POAP / PnP) loop. + + Args: + nd: NDModule instance (REST client). + fabric: Fabric name. + log: Logger. + + Returns: + List of raw switch dicts from the bootstrap API. + """ + log.debug("ENTER: query_bootstrap_switches()") + + endpoint = V1ManageFabricBootstrapGet() + endpoint.fabric_name = fabric + log.debug(f"Bootstrap endpoint: {endpoint.path}") + + try: + result = nd.request( + path=endpoint.path, verb=endpoint.verb, + ) + except Exception as e: + msg = ( + f"Failed to query bootstrap switches for " + f"fabric '{fabric}': {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + if isinstance(result, dict): + switches = result.get("switches", []) + elif isinstance(result, list): + switches = result + else: + switches = [] + + log.info( + f"Bootstrap API returned {len(switches)} " + f"switch(es) in POAP loop" + ) + log.debug("EXIT: query_bootstrap_switches()") + return switches + + +def build_bootstrap_index( + bootstrap_switches: List[Dict[str, Any]], +) -> Dict[str, Dict[str, Any]]: + """Build a serial-number-keyed index from bootstrap API data. + + Args: + bootstrap_switches: Raw switch dicts from the bootstrap API. + + Returns: + Dict mapping ``serial_number`` -> switch dict. + """ + return { + sw.get("serialNumber", sw.get("serial_number", "")): sw + for sw in bootstrap_switches + } + + +def build_poap_data_block(poap_cfg) -> Optional[Dict[str, Any]]: + """Build optional data block for bootstrap and pre-provision models. + + Args: + poap_cfg: ``POAPConfigModel`` from the user playbook. + + Returns: + Data block dict, or ``None`` if no ``config_data`` is present. + """ + if not poap_cfg.config_data: + return None + data_block: Dict[str, Any] = {} + gateway = poap_cfg.config_data.gateway + if gateway: + data_block["gatewayIpMask"] = gateway + if poap_cfg.config_data.models: + data_block["models"] = poap_cfg.config_data.models + return data_block or None + + +__all__ = [ + "query_bootstrap_switches", + "build_bootstrap_index", + "build_poap_data_block", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/exceptions.py b/plugins/module_utils/utils/nd_manage_switches/exceptions.py new file mode 100644 index 00000000..09d7ebb5 --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/exceptions.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Custom exceptions for ND Switch Resource operations.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class SwitchOperationError(Exception): + """Raised when a switch operation fails.""" + + +__all__ = [ + "SwitchOperationError", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py new file mode 100644 index 00000000..e1d6e4a7 --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Fabric-level operations: config save, deploy, and info retrieval.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import time +from typing import Any, Dict, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_config import ( + V1ManageFabricConfigDeployPost, + V1ManageFabricConfigSavePost, + V1ManageFabricGet, +) + +from .exceptions import SwitchOperationError + + +class FabricUtils: + """Fabric-level operations: config save, deploy, and info retrieval.""" + + def __init__( + self, + nd_module, + fabric: str, + logger: Optional[logging.Logger] = None, + ): + """Initialize FabricUtils. + + Args: + nd_module: NDModule or NDNetworkResourceModule instance. + fabric: Fabric name. + logger: Optional logger; defaults to ``nd.FabricUtils``. + """ + self.nd = nd_module + self.fabric = fabric + self.log = logger or logging.getLogger("nd.FabricUtils") + + # Pre-configure endpoints + self.ep_config_save = V1ManageFabricConfigSavePost() + self.ep_config_save.fabric_name = fabric + + self.ep_config_deploy = V1ManageFabricConfigDeployPost() + self.ep_config_deploy.fabric_name = fabric + + self.ep_fabric_get = V1ManageFabricGet() + self.ep_fabric_get.fabric_name = fabric + + # ----------------------------------------------------------------- + # Public API + # ----------------------------------------------------------------- + + def save_config( + self, + max_retries: int = 3, + retry_delay: int = 600, + ) -> Dict[str, Any]: + """Save (recalculate) fabric configuration. + + Retries up to ``max_retries`` times with ``retry_delay`` seconds + between attempts. + + Args: + max_retries: Maximum number of attempts (default ``3``). + retry_delay: Seconds to wait between failed attempts + (default ``600``). + + Returns: + API response dict from the first successful attempt. + + Raises: + SwitchOperationError: If all attempts fail. + """ + last_error: Exception = SwitchOperationError( + f"Config save produced no attempts for fabric {self.fabric}" + ) + for attempt in range(1, max_retries + 1): + try: + response = self._request_endpoint( + self.ep_config_save, action="Config save" + ) + self.log.info( + f"Config save succeeded on attempt " + f"{attempt}/{max_retries} for fabric {self.fabric}" + ) + return response + except SwitchOperationError as exc: + last_error = exc + self.log.warning( + f"Config save attempt {attempt}/{max_retries} failed " + f"for fabric {self.fabric}: {exc}" + ) + if attempt < max_retries: + self.log.info( + f"Retrying config save in {retry_delay}s " + f"(attempt {attempt + 1}/{max_retries})" + ) + time.sleep(retry_delay) + raise SwitchOperationError( + f"Config save failed after {max_retries} attempt(s) " + f"for fabric {self.fabric}: {last_error}" + ) + + def deploy_config(self) -> Dict[str, Any]: + """Deploy pending configuration to all switches in the fabric. + + The ``configDeploy`` endpoint requires no request body; it deploys + all pending changes for the fabric. + + Returns: + API response dict. + + Raises: + SwitchOperationError: If the deploy request fails. + """ + return self._request_endpoint( + self.ep_config_deploy, action="Config deploy" + ) + + def get_fabric_info(self) -> Dict[str, Any]: + """Retrieve fabric information. + + Returns: + Fabric information dict. + + Raises: + SwitchOperationError: If the request fails. + """ + return self._request_endpoint( + self.ep_fabric_get, action="Get fabric info" + ) + + # ----------------------------------------------------------------- + # Internal helpers + # ----------------------------------------------------------------- + + def _request_endpoint( + self, endpoint, action: str = "Request" + ) -> Dict[str, Any]: + """Execute a request against a pre-configured endpoint. + + Args: + endpoint: Endpoint object with ``.path`` and ``.verb``. + action: Human-readable label for log messages. + + Returns: + API response dict. + + Raises: + SwitchOperationError: On any request failure. + """ + self.log.info(f"{action} for fabric: {self.fabric}") + try: + response = self.nd.request(endpoint.path, verb=endpoint.verb) + self.log.info( + f"{action} completed for fabric: {self.fabric}" + ) + return response + except Exception as e: + self.log.error( + f"{action} failed for fabric {self.fabric}: {e}" + ) + raise SwitchOperationError( + f"{action} failed for fabric {self.fabric}: {e}" + ) from e + + +__all__ = [ + "FabricUtils", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/payload_utils.py b/plugins/module_utils/utils/nd_manage_switches/payload_utils.py new file mode 100644 index 00000000..effadfb8 --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/payload_utils.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""API payload builders for ND Switch Resource operations.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from copy import deepcopy +from typing import Any, Dict, List, Optional + + +def mask_password(payload: Dict[str, Any]) -> Dict[str, Any]: + """Return a deep copy of *payload* with password fields masked. + + Useful for safe logging of API payloads that contain credentials. + + Args: + payload: API payload dict (may contain ``password`` keys). + + Returns: + Copy with every ``password`` value replaced by ``"********"``. + """ + masked = deepcopy(payload) + if "password" in masked: + masked["password"] = "********" + if isinstance(masked.get("switches"), list): + for switch in masked["switches"]: + if isinstance(switch, dict) and "password" in switch: + switch["password"] = "********" + return masked + + +class PayloadUtils: + """Stateless helper for building ND Switch Resource API request payloads.""" + + def __init__(self, logger: Optional[logging.Logger] = None): + """Initialize PayloadUtils. + + Args: + logger: Optional logger; defaults to ``nd.PayloadUtils``. + """ + self.log = logger or logging.getLogger("nd.PayloadUtils") + + def build_credentials_payload( + self, + serial_numbers: List[str], + username: str, + password: str, + ) -> Dict[str, Any]: + """Build payload for saving switch credentials. + + Args: + serial_numbers: Switch serial numbers. + username: Switch username. + password: Switch password. + + Returns: + Credentials API payload dict. + """ + return { + "switchIds": serial_numbers, + "username": username, + "password": password, + } + + def build_switch_ids_payload( + self, + serial_numbers: List[str], + ) -> Dict[str, Any]: + """Build payload with switch IDs for remove / batch operations. + + Args: + serial_numbers: Switch serial numbers. + + Returns: + ``{"switchIds": [...]}`` payload dict. + """ + return {"switchIds": serial_numbers} + + +__all__ = [ + "mask_password", + "PayloadUtils", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_helpers.py b/plugins/module_utils/utils/nd_manage_switches/switch_helpers.py new file mode 100644 index 00000000..bffb2bdb --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/switch_helpers.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Stateless utility helpers for switch field extraction, operation-type detection, and credential grouping.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from typing import Any, Dict, List, Optional, Tuple, Union + + +def get_switch_field( + switch, + field_names: List[str], +) -> Optional[Any]: + """Extract a field value from a switch config, trying multiple names. + + Supports Pydantic models and plain dicts with both snake_case and + camelCase key lookups. + + Args: + switch: Switch model or dict to extract from. + field_names: Candidate field names to try, in priority order. + + Returns: + First non-``None`` value found, or ``None``. + """ + for name in field_names: + if hasattr(switch, name): + value = getattr(switch, name) + if value is not None: + return value + elif isinstance(switch, dict): + if name in switch and switch[name] is not None: + return switch[name] + # Try camelCase variant + camel = ''.join( + word.capitalize() if i > 0 else word + for i, word in enumerate(name.split('_')) + ) + if camel in switch and switch[camel] is not None: + return switch[camel] + return None + + +def determine_operation_type(switch) -> str: + """Determine the operation type from switch configuration. + + Args: + switch: A ``SwitchConfigModel``, ``SwitchDiscoveryModel``, + or raw dict. + + Returns: + ``'normal'``, ``'poap'``, or ``'rma'``. + """ + # Pydantic model with .operation_type attribute + if hasattr(switch, 'operation_type'): + return switch.operation_type + + if isinstance(switch, dict): + if 'poap' in switch or 'bootstrap' in switch: + return 'poap' + if ( + 'rma' in switch + or 'old_serial' in switch + or 'oldSerial' in switch + ): + return 'rma' + + return 'normal' + + +def group_switches_by_credentials( + switches, + log: logging.Logger, +) -> Dict[Tuple, list]: + """Group switches by shared credentials for bulk API operations. + + Args: + switches: Validated ``SwitchConfigModel`` instances. + log: Logger. + + Returns: + Dict mapping a ``(username, password_hash, auth_proto, + platform_type, preserve_config)`` tuple to the list of switches + sharing those credentials. + """ + groups: Dict[Tuple, list] = {} + + for switch in switches: + password_hash = hash(switch.password) + group_key = ( + switch.user_name, + password_hash, + switch.auth_proto, + switch.platform_type, + switch.preserve_config, + ) + groups.setdefault(group_key, []).append(switch) + + log.info( + f"Grouped {len(switches)} switches into " + f"{len(groups)} credential group(s)" + ) + + for idx, (key, group_switches) in enumerate(groups.items(), 1): + username, _, auth_proto, platform_type, preserve_config = key + auth_value = ( + auth_proto.value + if hasattr(auth_proto, 'value') + else str(auth_proto) + ) + platform_value = ( + platform_type.value + if hasattr(platform_type, 'value') + else str(platform_type) + ) + log.debug( + f"Group {idx}: {len(group_switches)} switches with " + f"username={username}, auth={auth_value}, " + f"platform={platform_value}, " + f"preserve_config={preserve_config}" + ) + + return groups + + +__all__ = [ + "get_switch_field", + "determine_operation_type", + "group_switches_by_credentials", +] diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py new file mode 100644 index 00000000..5f9350ae --- /dev/null +++ b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py @@ -0,0 +1,593 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Multi-phase wait utilities for switch lifecycle operations.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import time +from typing import Any, Dict, List, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_config import ( + V1ManageFabricInventoryDiscoverGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_switches import ( + V1ManageFabricSwitchesGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_switch_actions import ( + V1ManageFabricSwitchActionsRediscoverPost, +) + +from .fabric_utils import FabricUtils + + +class SwitchWaitUtils: + """Multi-phase wait utilities for switch lifecycle operations. + + Polls the fabric switches API until target switches reach a manageable state, + handling migration mode, greenfield/brownfield shortcuts, and rediscovery. + """ + + # Default wait parameters + DEFAULT_MAX_ATTEMPTS: int = 300 + DEFAULT_WAIT_INTERVAL: int = 5 # seconds + + # Status values indicating the switch is ready + MANAGEABLE_STATUSES = frozenset({"ok", "manageable"}) + + # Status values indicating an operation is still in progress + IN_PROGRESS_STATUSES = frozenset({ + "inProgress", "migration", "discovering", "rediscovering", + }) + + # Status values indicating failure + FAILED_STATUSES = frozenset({ + "failed", + "unreachable", + "authenticationFailed", + "timeout", + "discoveryTimeout", + "notReacheable", # Note: typo matches the API spec + "notAuthorized", + "unknownUserPassword", + "connectionError", + "sshSessionError", + }) + + # Sleep multipliers for each phase + _MIGRATION_SLEEP_FACTOR: float = 2.0 + _REDISCOVERY_SLEEP_FACTOR: float = 3.5 + + def __init__( + self, + nd_module, + fabric: str, + logger: Optional[logging.Logger] = None, + max_attempts: Optional[int] = None, + wait_interval: Optional[int] = None, + fabric_utils: Optional["FabricUtils"] = None, + ): + """Initialize SwitchWaitUtils. + + Args: + nd_module: Parent module instance (must expose ``.nd``). + fabric: Fabric name. + logger: Optional logger; defaults to ``nd.SwitchWaitUtils``. + max_attempts: Max polling iterations (default ``300``). + wait_interval: Seconds between polls (default ``5``). + fabric_utils: Optional ``FabricUtils`` instance for fabric + info queries. Created internally if not provided. + """ + self.nd = nd_module.nd + self.fabric = fabric + self.log = logger or logging.getLogger("nd.SwitchWaitUtils") + self.max_attempts = max_attempts or self.DEFAULT_MAX_ATTEMPTS + self.wait_interval = wait_interval or self.DEFAULT_WAIT_INTERVAL + self.fabric_utils = ( + fabric_utils or FabricUtils(nd_module, fabric, self.log) + ) + + # Pre-configure endpoints + self.ep_switches_get = V1ManageFabricSwitchesGet() + self.ep_switches_get.fabric_name = fabric + + self.ep_inventory_discover = V1ManageFabricInventoryDiscoverGet() + self.ep_inventory_discover.fabric_name = fabric + + self.ep_rediscover = V1ManageFabricSwitchActionsRediscoverPost() + self.ep_rediscover.fabric_name = fabric + + # Cached greenfield flag + self._greenfield_debug_enabled: Optional[bool] = None + + # ===================================================================== + # Public API – Wait Methods + # ===================================================================== + + def wait_for_switch_manageable( + self, + serial_numbers: List[str], + all_preserve_config: bool = False, + skip_greenfield_check: bool = False, + ) -> bool: + """Wait for switches to exit migration mode and become manageable. + + Runs a multi-phase poll: migration-mode exit, normal-mode entry, + brownfield shortcut, greenfield shortcut, unreachable detection, + and final rediscovery to ok status. + + Args: + serial_numbers: Switch serial numbers to monitor. + all_preserve_config: Set to ``True`` when all switches in the + batch are brownfield (``preserve_config=True``). Skips + reload detection, as brownfield switches never reload. + skip_greenfield_check: Set to ``True`` to bypass the greenfield + debug flag shortcut (required for POAP bootstrap where + the device always reboots). + + Returns: + ``True`` if all switches are manageable, ``False`` on timeout. + """ + self.log.info( + f"Waiting for switches to become manageable: {serial_numbers}" + ) + + # Phase 1 + 2: migration → normal + if not self._wait_for_system_mode(serial_numbers): + return False + + # Phase 3: brownfield shortcut — no reload expected + if all_preserve_config: + self.log.info( + "All switches are brownfield (preserve_config=True) — " + "skipping reload detection (phases 5-6)" + ) + return True + + # Phase 4: greenfield shortcut (skipped for POAP bootstrap) + if ( + not skip_greenfield_check + and self._is_greenfield_debug_enabled() + ): + self.log.info( + "Greenfield debug flag enabled — " + "skipping reload detection" + ) + return True + + if skip_greenfield_check: + self.log.info( + "Greenfield debug check skipped " + "(POAP bootstrap — device always reboots)" + ) + + # Phase 5: wait for "unreachable" (switch is reloading) + if not self._wait_for_discovery_state( + serial_numbers, "unreachable" + ): + return False + + # Phase 6: wait for "ok" (switch is ready) + return self._wait_for_discovery_state( + serial_numbers, "ok" + ) + + def wait_for_discovery( + self, + seed_ip: str, + max_attempts: Optional[int] = None, + wait_interval: Optional[int] = None, + ) -> Optional[Dict[str, Any]]: + """Poll until a single switch discovery completes. + + Args: + seed_ip: IP address of the switch being discovered. + max_attempts: Override max attempts (default ``30``). + wait_interval: Override interval in seconds (default ``5``). + + Returns: + Discovery data dict on success, ``None`` on failure or timeout. + """ + attempts = max_attempts or 30 + interval = wait_interval or self.wait_interval + + self.log.info(f"Waiting for discovery of: {seed_ip}") + + for attempt in range(attempts): + status = self._get_discovery_status(seed_ip) + + if ( + status + and status.get("status") in self.MANAGEABLE_STATUSES + ): + self.log.info(f"Discovery completed for {seed_ip}") + return status + + if ( + status + and status.get("status") in self.FAILED_STATUSES + ): + self.log.error( + f"Discovery failed for {seed_ip}: {status}" + ) + return None + + self.log.debug( + f"Discovery attempt {attempt + 1}/{attempts} " + f"for {seed_ip}" + ) + time.sleep(interval) + + self.log.warning(f"Discovery timeout for {seed_ip}") + return None + + # ===================================================================== + # Phase Helpers – System Mode + # ===================================================================== + + def _wait_for_system_mode( + self, serial_numbers: List[str] + ) -> bool: + """Poll until all switches transition from migration mode to normal mode. + + Args: + serial_numbers: Switch serial numbers to monitor. + + Returns: + ``True`` when all switches are in ``normal`` mode, + ``False`` on timeout or API failure. + """ + # Sub-phase A: exit "migration" mode + pending = self._poll_system_mode( + serial_numbers, + target_mode="migration", + expect_match=True, + ) + if pending is None: + return False + + # Sub-phase B: enter "normal" mode + pending = self._poll_system_mode( + serial_numbers, + target_mode="normal", + expect_match=False, + ) + if pending is None: + return False + + self.log.info( + "All switches in normal system mode — " + "proceeding to discovery checks" + ) + return True + + def _poll_system_mode( + self, + serial_numbers: List[str], + target_mode: str, + expect_match: bool, + ) -> Optional[List[str]]: + """Poll until no switches remain in (or outside) ``target_mode``. + + Args: + serial_numbers: Switches to check. + target_mode: System mode string (e.g. ``"migration"``). + expect_match: When ``True``, waits for switches to leave + ``target_mode``. When ``False``, waits for + switches to enter ``target_mode``. + + Returns: + Empty list on success, ``None`` on timeout or API error. + """ + pending = list(serial_numbers) + label = ( + f"exit '{target_mode}'" + if expect_match + else f"enter '{target_mode}'" + ) + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return pending + + switch_data = self._fetch_switch_data() + if switch_data is None: + return None + + remaining = self._filter_by_system_mode( + pending, switch_data, target_mode, expect_match + ) + + if not remaining: + self.log.info( + f"All switches {label} mode (attempt {attempt})" + ) + return remaining + + pending = remaining + self.log.debug( + f"Attempt {attempt}/{self.max_attempts}: " + f"{len(pending)} switch(es) waiting to " + f"{label}: {pending}" + ) + time.sleep( + self.wait_interval * self._MIGRATION_SLEEP_FACTOR + ) + + self.log.warning( + f"Timeout waiting for switches to {label}: {pending}" + ) + return None + + # ===================================================================== + # Filtering (static, pure-logic helpers) + # ===================================================================== + + @staticmethod + def _filter_by_system_mode( + serial_numbers: List[str], + switch_data: List[Dict[str, Any]], + target_mode: str, + expect_match: bool, + ) -> List[str]: + """Return serial numbers that have NOT yet satisfied the mode check. + + Args: + serial_numbers: Switches to inspect. + switch_data: Raw switch dicts from the GET API. + target_mode: e.g. ``"migration"`` or ``"normal"``. + expect_match: When ``True``, waits for switches to leave + ``target_mode``. When ``False``, waits for + switches to enter ``target_mode``. + + Returns: + Serial numbers still waiting. + """ + switch_index = { + sw.get("serialNumber"): sw for sw in switch_data + } + remaining: List[str] = [] + for sn in serial_numbers: + sw = switch_index.get(sn) + if sw is None: + remaining.append(sn) + continue + mode = ( + sw.get("additionalData", {}) + .get("systemMode", "") + .lower() + ) + # expect_match=True: "still in target_mode" → not done + # expect_match=False: "not yet in target_mode" → not done + still_waiting = ( + (mode == target_mode) + if expect_match + else (mode != target_mode) + ) + if still_waiting: + remaining.append(sn) + return remaining + + @staticmethod + def _filter_by_discovery_status( + serial_numbers: List[str], + switch_data: List[Dict[str, Any]], + target_state: str, + ) -> List[str]: + """Return serial numbers not yet at ``target_state``. + + Args: + serial_numbers: Switches to inspect. + switch_data: Raw switch dicts from the GET API. + target_state: e.g. ``"unreachable"`` or ``"ok"``. + + Returns: + Serial numbers still waiting. + """ + switch_index = { + sw.get("serialNumber"): sw for sw in switch_data + } + remaining: List[str] = [] + for sn in serial_numbers: + sw = switch_index.get(sn) + if sw is None: + remaining.append(sn) + continue + status = ( + sw.get("additionalData", {}) + .get("discoveryStatus", "") + .lower() + ) + if status != target_state: + remaining.append(sn) + return remaining + + # ===================================================================== + # Phase Helpers – Discovery Status + # ===================================================================== + + def _wait_for_discovery_state( + self, + serial_numbers: List[str], + target_state: str, + ) -> bool: + """Poll until all switches reach the given discovery status. + + Triggers rediscovery on each iteration for switches that have not + yet reached the target state. + + Args: + serial_numbers: Switch serial numbers to monitor. + target_state: Expected discovery status, e.g. ``"unreachable"`` + or ``"ok"``. + + Returns: + ``True`` when all switches reach ``target_state``, + ``False`` on timeout. + """ + pending = list(serial_numbers) + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return True + + switch_data = self._fetch_switch_data() + if switch_data is None: + return False + + pending = self._filter_by_discovery_status( + pending, switch_data, target_state + ) + + if not pending: + self.log.info( + f"All switches reached '{target_state}' state " + f"(attempt {attempt})" + ) + return True + + self._trigger_rediscovery(pending) + self.log.debug( + f"Attempt {attempt}/{self.max_attempts}: " + f"{len(pending)} switch(es) not yet " + f"'{target_state}': {pending}" + ) + time.sleep( + self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR + ) + + self.log.warning( + f"Timeout waiting for '{target_state}' state: " + f"{serial_numbers}" + ) + return False + + # ===================================================================== + # API Helpers + # ===================================================================== + + def _fetch_switch_data( + self, + ) -> Optional[List[Dict[str, Any]]]: + """GET current switch data for the fabric. + + Returns: + List of switch dicts, or ``None`` on failure. + """ + try: + response = self.nd.request( + self.ep_switches_get.path, + verb=self.ep_switches_get.verb, + ) + switch_data = response.get("switches", []) + if not switch_data: + self.log.error( + "No switch data returned for fabric" + ) + return None + return switch_data + except Exception as e: + self.log.error(f"Failed to fetch switch data: {e}") + return None + + def _trigger_rediscovery( + self, serial_numbers: List[str] + ) -> None: + """POST a rediscovery request for the given switches. + + Args: + serial_numbers: Switch serial numbers to rediscover. + """ + if not serial_numbers: + return + + payload = {"switchIds": serial_numbers} + self.log.info( + f"Triggering rediscovery for: {serial_numbers}" + ) + try: + self.nd.request( + self.ep_rediscover.path, + verb=self.ep_rediscover.verb, + data=payload, + ) + except Exception as e: + self.log.warning( + f"Failed to trigger rediscovery: {e}" + ) + + def _get_discovery_status( + self, seed_ip: str, + ) -> Optional[Dict[str, Any]]: + """GET discovery status for a single switch by IP. + + Args: + seed_ip: IP address of the switch. + + Returns: + Switch dict from the discovery API, or ``None``. + """ + try: + response = self.nd.request( + self.ep_inventory_discover.path, + verb=self.ep_inventory_discover.verb, + ) + for switch in response.get("switches", []): + if ( + switch.get("ip") == seed_ip + or switch.get("ipaddr") == seed_ip + ): + return switch + return None + except Exception as e: + self.log.debug( + f"Discovery status check failed: {e}" + ) + return None + + def _is_greenfield_debug_enabled(self) -> bool: + """Check whether the fabric has the greenfield debug flag enabled. + + Uses the ``FabricUtils`` instance. Result is cached for the + lifetime of the instance. + + Returns: + ``True`` if the flag is ``"enable"``, ``False`` otherwise. + """ + if self._greenfield_debug_enabled is not None: + return self._greenfield_debug_enabled + + try: + fabric_info = self.fabric_utils.get_fabric_info() + self.log.debug( + f"Fabric info retrieved for greenfield check: " + f"{fabric_info}" + ) + flag = ( + fabric_info + .get("management", {}) + .get("greenfieldDebugFlag", "") + .lower() + ) + self.log.debug( + f"Greenfield debug flag value: '{flag}'" + ) + self._greenfield_debug_enabled = flag == "enable" + except Exception as e: + self.log.debug( + f"Failed to get greenfield debug flag: {e}" + ) + self._greenfield_debug_enabled = False + + return self._greenfield_debug_enabled + + +__all__ = [ + "SwitchWaitUtils", +] diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py new file mode 100644 index 00000000..559f1bd0 --- /dev/null +++ b/plugins/modules/nd_manage_switches.py @@ -0,0 +1,622 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__copyright__ = "Copyright (c) 2026 Cisco and/or its affiliates." +__author__ = "Akshayanat Chengam Saravanan" + +DOCUMENTATION = """ +--- +module: nd_manage_switches +short_description: Manage switches in Cisco Nexus Dashboard (ND). +version_added: "1.0.0" +author: Akshayanat Chengam Saravanan (@achengam) +description: +- Add, delete, override, and query switches in Cisco Nexus Dashboard. +- Supports normal discovery, POAP (bootstrap/preprovision), and RMA operations. +- Uses Pydantic model validation for switch configurations. +- Provides state-based operations with intelligent diff calculation. +options: + fabric: + description: + - Name of the target fabric for switch operations. + type: str + required: yes + state: + description: + - The state of ND and switch(es) after module completion. + - C(merged) and C(query) are the only states supported for POAP. + - C(merged) is the only state supported for RMA. + type: str + default: merged + choices: + - merged + - overridden + - deleted + - query + save: + description: + - Save/Recalculate the configuration of the fabric after inventory is updated. + type: bool + default: true + deploy: + description: + - Deploy the pending configuration of the fabric after inventory is updated. + type: bool + default: true + config: + description: + - List of switch configurations. Optional for state C(deleted). + type: list + elements: dict + suboptions: + seed_ip: + description: + - Seed IP address or DNS name of the switch to manage. + type: str + required: true + auth_proto: + description: + - SNMP authentication protocol to use. + - For POAP and RMA, should be C(MD5). + type: str + default: MD5 + choices: ['MD5', 'SHA', 'MD5_DES', 'MD5_AES', 'SHA_DES', 'SHA_AES'] + user_name: + description: + - Login username for the switch. + - For POAP and RMA, should be C(admin). + type: str + default: admin + password: + description: + - Login password for the switch. + type: str + required: true + role: + description: + - Role to assign to the switch in the fabric. + type: str + default: leaf + choices: + - leaf + - spine + - border + - border_spine + - border_gateway + - border_gateway_spine + - super_spine + - border_super_spine + - border_gateway_super_spine + - access + - aggregation + - edge_router + - core_router + - tor + preserve_config: + description: + - Set to C(false) for greenfield deployment, C(true) for brownfield. + type: bool + default: false + poap: + description: + - POAP (PowerOn Auto Provisioning) configurations for bootstrap/preprovision. + - POAP and DHCP must be enabled in fabric before using. + type: list + elements: dict + suboptions: + discovery_username: + description: + - Username for device discovery during POAP. + type: str + discovery_password: + description: + - Password for device discovery during POAP. + type: str + no_log: true + serial_number: + description: + - Serial number of the physical switch to Bootstrap. + - When used together with C(preprovision_serial), performs a swap operation + that changes the serial number of a pre-provisioned switch and then + imports it via bootstrap. + type: str + preprovision_serial: + description: + - Serial number of switch to Pre-provision. + - When used together with C(serial_number), performs a swap operation + that changes the serial number of this pre-provisioned switch to + C(serial_number) and then imports it via bootstrap. + type: str + model: + description: + - Model of switch to Bootstrap/Pre-provision. + type: str + version: + description: + - Software version of switch. + type: str + hostname: + description: + - Hostname for the switch. + type: str + image_policy: + description: + - Image policy to apply. + type: str + config_data: + description: + - Basic configuration data for the switch during Bootstrap/Pre-provision. + - C(models) and C(gateway) are mandatory. + - C(models) is list of model of modules in switch to Bootstrap/Pre-provision. + - C(gateway) is the gateway IP with mask for the switch. + type: dict + suboptions: + models: + description: + - List of module models in the switch (e.g., N9K-X9364v, N9K-vSUP). + type: list + elements: str + gateway: + description: + - Gateway IP with subnet mask (e.g., 192.168.0.1/24). + type: str + rma: + description: + - RMA an existing switch with a new one. + - Please note that the existing switch should be configured and deployed in maintenance mode. + - Please note that the existing switch being replaced should be shutdown state or out of network. + type: list + elements: dict + suboptions: + discovery_username: + description: + - Username for device discovery during POAP and RMA discovery. + type: str + discovery_password: + description: + - Password for device discovery during POAP and RMA discovery. + type: str + serial_number: + description: + - Serial number of switch to Bootstrap for RMA. + type: str + required: true + old_serial: + description: + - Serial number of switch to be replaced by RMA. + type: str + required: true + model: + description: + - Model of switch to Bootstrap for RMA. + type: str + required: true + version: + description: + - Software version of switch to Bootstrap for RMA. + type: str + required: true + image_policy: + description: + - Name of the image policy to be applied on switch during Bootstrap for RMA. + type: str + config_data: + description: + - Basic config data of switch to Bootstrap for RMA. + - C(models) and C(gateway) are mandatory. + - C(models) is list of model of modules in switch to Bootstrap for RMA. + - C(gateway) is the gateway IP with mask for the switch to Bootstrap for RMA. + type: dict + required: true + suboptions: + models: + description: + - List of module models in the switch. + type: list + elements: str + required: true + gateway: + description: + - Gateway IP with subnet mask (e.g., 192.168.0.1/24). + type: str + required: true + - Serial number of new replacement switch. + type: str + required: true + model: + description: + - Model of new switch. + type: str + required: true + version: + description: + - Software version of new switch. + type: str + required: true + hostname: + description: + - Hostname for the replacement switch. + type: str + required: true + image_policy: + description: + - Image policy to apply. + type: str + required: true + ip: + description: + - IP address of the replacement switch. + type: str + required: true + gateway_ip: + description: + - Gateway IP with subnet mask. + type: str + required: true + discovery_password: + description: + - Password for device discovery during RMA. + type: str + required: true +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module requires NDFC 12.x or higher. +- POAP operations require POAP and DHCP to be enabled in fabric settings. +- RMA operations require the old switch to be in a replaceable state. +""" + +EXAMPLES = """ +- name: Add a switch to fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + user_name: admin + password: "{{ switch_password }}" + role: leaf + preserve_config: false + state: merged + +- name: Add multiple switches + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + user_name: admin + password: "{{ switch_password }}" + role: leaf + - seed_ip: 192.168.10.202 + user_name: admin + password: "{{ switch_password }}" + role: spine + state: merged + +- name: Preprovision a switch via POAP + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + user_name: admin + password: "{{ switch_password }}" + poap: + - preprovision_serial: SAL1234ABCD + model: N9K-C93180YC-EX + version: "10.3(1)" + hostname: leaf-preprov + gateway_ip: 192.168.10.1/24 + state: merged + +- name: Bootstrap a switch via POAP + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + user_name: admin + password: "{{ switch_password }}" + poap: + - serial_number: SAL5678EFGH + model: N9K-C93180YC-EX + version: "10.3(1)" + hostname: leaf-bootstrap + gateway_ip: 192.168.10.1/24 + state: merged + +- name: Swap serial number on a pre-provisioned switch (POAP swap) + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + user_name: admin + password: "{{ switch_password }}" + poap: + - serial_number: SAL5678EFGH + preprovision_serial: SAL1234ABCD + state: merged + +- name: RMA - Replace a switch + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + user_name: admin + password: "{{ switch_password }}" + rma: + - old_serial: SAL1234ABCD + serial_number: SAL9999ZZZZ + model: N9K-C93180YC-EX + version: "10.3(1)" + hostname: leaf-replaced + image_policy: my-image-policy + ip: 192.168.10.50 + gateway_ip: 192.168.10.1/24 + discovery_password: "{{ discovery_password }}" + state: merged + +- name: Remove switches from fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + - seed_ip: 192.168.10.202 + state: deleted + +- name: Query all switches in fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + state: query + register: switches_result +""" + +RETURN = """ +previous: + description: The configuration prior to the module execution. + returned: always + type: list + elements: dict +proposed: + description: The proposed configuration sent to the API. + returned: always + type: list + elements: dict +sent: + description: The configuration sent to the API. + returned: when state is not query + type: list + elements: dict +current: + description: The current configuration after module execution. + returned: always + type: list + elements: dict +""" + +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log +from ansible_collections.cisco.nd.plugins.module_utils.nd_switch_resources import NDSwitchResourceModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( + NDModule, + NDModuleError, + nd_argument_spec, +) +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results + + +def main(): + """Main entry point for the nd_manage_switches module.""" + + # Build argument spec + argument_spec = nd_argument_spec() + argument_spec.update( + fabric=dict(type="str", required=True), + config=dict( + type="list", + elements="dict", + options=dict( + seed_ip=dict(type="str", required=True), + auth_proto=dict( + type="str", + default="MD5", + choices=["MD5", "SHA", "MD5_DES", "MD5_AES", "SHA_DES", "SHA_AES"] + ), + user_name=dict(type="str", default="admin"), + password=dict(type="str", no_log=True), + role=dict( + type="str", + default="leaf", + choices=[ + "leaf", "spine", "border", "border_spine", + "border_gateway", "border_gateway_spine", + "super_spine", "border_super_spine", + "border_gateway_super_spine", "access", + "aggregation", "edge_router", "core_router", "tor" + ] + ), + preserve_config=dict(type="bool", default=False), + poap=dict( + type="list", + elements="dict", + options=dict( + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + serial_number=dict(type="str"), + preprovision_serial=dict(type="str"), + model=dict(type="str"), + version=dict(type="str"), + hostname=dict(type="str"), + image_policy=dict(type="str"), + config_data=dict( + type="dict", + options=dict( + models=dict( + type="list", + elements="str", + ), + gateway=dict( + type="str", + ), + ), + ), + ), + ), + rma=dict( + type="list", + elements="dict", + options=dict( + old_serial=dict(type="str", required=True), + serial_number=dict(type="str", required=True), + model=dict(type="str", required=True), + version=dict(type="str", required=True), + image_policy=dict(type="str"), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + config_data=dict( + type="dict", + required=True, + options=dict( + models=dict( + type="list", + elements="str", + required=True, + ), + gateway=dict( + type="str", + required=True, + ), + ), + ), + ), + ), + ), + ), + save=dict(type="bool", default=True), + deploy=dict(type="bool", default=True), + state=dict( + type="str", + default="merged", + choices=["merged", "overridden", "deleted", "query"] + ), + ) + + # Create Ansible module + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ("state", "merged", ["config"]), + ("state", "overridden", ["config"]), + ], + ) + + # Initialize logging + try: + log_config = Log() + log_config.config = "/Users/achengam/Documents/Ansible_Dev/NDBranch/ansible_collections/cisco/nd/ansible_cisco_log_r.json" + log_config.commit() + # Create logger instance for this module + log = logging.getLogger("nd.nd_manage_switches") + except ValueError as error: + module.fail_json(msg=str(error)) + + # Get parameters + state = module.params.get("state") + fabric = module.params.get("fabric") + output_level = module.params.get("output_level") + + # Initialize Results - this collects all operation results + results = Results() + results.state = state + results.check_mode = module.check_mode + results.action = f"manage_switches_{state}" + + try: + log.info(f"Starting nd_manage_switches module: fabric={fabric}, state={state}") + + # Initialize NDModule (uses RestSend infrastructure internally) + nd = NDModule(module) + log.info("NDModule initialized successfully") + + # Create NDSwitchResourceModule + sw_module = NDSwitchResourceModule( + nd=nd, + results=results, + logger=log + ) + log.info(f"NDSwitchResourceModule initialized for fabric: {fabric}") + + # Manage state for merged, overridden, deleted, query + log.info(f"Managing state: {state}") + sw_module.manage_state() + + # Exit with results + log.info(f"State management completed successfully. Changed: {results.changed}") + sw_module.exit_json() + + except NDModuleError as error: + # NDModule-specific errors (API failures, authentication issues, etc.) + log.error(f"NDModule error: {error.msg}") + + # Try to get response from RestSend if available + try: + results.response_current = nd.rest_send.response_current + results.result_current = nd.rest_send.result_current + except (AttributeError, ValueError): + # Fallback if RestSend wasn't initialized or no response available + results.response_current = { + "RETURN_CODE": error.status if error.status else -1, + "MESSAGE": error.msg, + "DATA": error.response_payload if error.response_payload else {}, + } + results.result_current = { + "success": False, + "found": False, + } + + results.diff_current = {} + results.register_task_result() + results.build_final_result() + + # Add error details if debug output is requested + if output_level == "debug": + results.final_result["error_details"] = error.to_dict() + + log.error(f"Module failed: {results.final_result}") + module.fail_json(msg=error.msg, **results.final_result) + + except Exception as error: + # Unexpected errors + log.error(f"Unexpected error during module execution: {str(error)}") + log.error(f"Error type: {type(error).__name__}") + + # Build failed result + results.response_current = { + "RETURN_CODE": -1, + "MESSAGE": f"Unexpected error: {str(error)}", + "DATA": {}, + } + results.result_current = { + "success": False, + "found": False, + } + results.diff_current = {} + results.register_task_result() + results.build_final_result() + + if output_level == "debug": + import traceback + results.final_result["traceback"] = traceback.format_exc() + + module.fail_json(msg=str(error), **results.final_result) + + +if __name__ == "__main__": + main() From 756bdccf7c0efbad86dfc77886368f94acec490b Mon Sep 17 00:00:00 2001 From: Allen Robel Date: Wed, 11 Mar 2026 08:46:19 -1000 Subject: [PATCH 003/109] [ignore] Add Endpoints framework for ND API v1 (#186) --- .../module_utils/common/pydantic_compat.py | 243 +++++ plugins/module_utils/endpoints/__init__.py | 0 plugins/module_utils/endpoints/base.py | 134 +++ plugins/module_utils/endpoints/mixins.py | 86 ++ .../module_utils/endpoints/query_params.py | 324 +++++++ plugins/module_utils/endpoints/v1/__init__.py | 0 .../endpoints/v1/infra/__init__.py | 0 .../endpoints/v1/infra/base_path.py | 80 ++ .../v1/infra/clusterhealth_config.py | 120 +++ .../v1/infra/clusterhealth_status.py | 139 +++ .../module_utils/endpoints/v1/infra/login.py | 85 ++ .../endpoints/v1/manage/__init__.py | 0 .../endpoints/v1/manage/base_path.py | 80 ++ plugins/module_utils/enums.py | 158 ++++ tests/unit/module_utils/common_utils.py | 75 ++ .../module_utils/endpoints/test_base_model.py | 245 +++++ .../endpoints/test_base_paths_infra.py | 267 ++++++ .../endpoints/test_base_paths_manage.py | 191 ++++ .../endpoints/test_endpoint_mixins.py | 82 ++ ...st_endpoints_api_v1_infra_clusterhealth.py | 485 ++++++++++ .../test_endpoints_api_v1_infra_login.py | 68 ++ .../endpoints/test_query_params.py | 845 ++++++++++++++++++ 22 files changed, 3707 insertions(+) create mode 100644 plugins/module_utils/common/pydantic_compat.py create mode 100644 plugins/module_utils/endpoints/__init__.py create mode 100644 plugins/module_utils/endpoints/base.py create mode 100644 plugins/module_utils/endpoints/mixins.py create mode 100644 plugins/module_utils/endpoints/query_params.py create mode 100644 plugins/module_utils/endpoints/v1/__init__.py create mode 100644 plugins/module_utils/endpoints/v1/infra/__init__.py create mode 100644 plugins/module_utils/endpoints/v1/infra/base_path.py create mode 100644 plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py create mode 100644 plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py create mode 100644 plugins/module_utils/endpoints/v1/infra/login.py create mode 100644 plugins/module_utils/endpoints/v1/manage/__init__.py create mode 100644 plugins/module_utils/endpoints/v1/manage/base_path.py create mode 100644 plugins/module_utils/enums.py create mode 100644 tests/unit/module_utils/common_utils.py create mode 100644 tests/unit/module_utils/endpoints/test_base_model.py create mode 100644 tests/unit/module_utils/endpoints/test_base_paths_infra.py create mode 100644 tests/unit/module_utils/endpoints/test_base_paths_manage.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoint_mixins.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_clusterhealth.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_login.py create mode 100644 tests/unit/module_utils/endpoints/test_query_params.py diff --git a/plugins/module_utils/common/pydantic_compat.py b/plugins/module_utils/common/pydantic_compat.py new file mode 100644 index 00000000..e1550a18 --- /dev/null +++ b/plugins/module_utils/common/pydantic_compat.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# pylint: disable=too-few-public-methods +""" +# Summary + +Pydantic compatibility layer. + +This module provides a single location for Pydantic imports with fallback +implementations when Pydantic is not available. This ensures consistent +behavior across all modules and follows the DRY principle. + +## Usage + +### Importing + +Rather than importing directly from pydantic, import from this module: + +```python +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel +``` + +This ensure that Ansible sanity tests will not fail due to missing Pydantic dependencies. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import traceback +from typing import TYPE_CHECKING, Any, Callable, Union + +if TYPE_CHECKING: + # Type checkers always see the real Pydantic types + from pydantic import ( + AfterValidator, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + PydanticExperimentalWarning, + StrictBool, + ValidationError, + field_serializer, + field_validator, + model_validator, + validator, + ) + + HAS_PYDANTIC = True # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name +else: + # Runtime: try to import, with fallback + try: + from pydantic import ( + AfterValidator, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + PydanticExperimentalWarning, + StrictBool, + ValidationError, + field_serializer, + field_validator, + model_validator, + validator, + ) + except ImportError: + HAS_PYDANTIC = False # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR: Union[str, None] = traceback.format_exc() # pylint: disable=invalid-name + + # Fallback: Minimal BaseModel replacement + class BaseModel: + """Fallback BaseModel when pydantic is not available.""" + + model_config = {"validate_assignment": False, "use_enum_values": False} + + def __init__(self, **kwargs): + """Accept keyword arguments and set them as attributes.""" + for key, value in kwargs.items(): + setattr(self, key, value) + + def model_dump(self, exclude_none: bool = False, exclude_defaults: bool = False) -> dict: # pylint: disable=unused-argument + """Return a dictionary of field names and values. + + Args: + exclude_none: If True, exclude fields with None values + exclude_defaults: Accepted for API compatibility but not implemented in fallback + """ + result = {} + for key, value in self.__dict__.items(): + if exclude_none and value is None: + continue + result[key] = value + return result + + # Fallback: ConfigDict that does nothing + def ConfigDict(**kwargs) -> dict: # pylint: disable=unused-argument,invalid-name + """Pydantic ConfigDict fallback when pydantic is not available.""" + return kwargs + + # Fallback: Field that does nothing + def Field(**kwargs) -> Any: # pylint: disable=unused-argument,invalid-name + """Pydantic Field fallback when pydantic is not available.""" + if "default_factory" in kwargs: + return kwargs["default_factory"]() + return kwargs.get("default") + + # Fallback: field_serializer decorator that does nothing + def field_serializer(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic field_serializer fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: field_validator decorator that does nothing + def field_validator(*args, **kwargs) -> Callable[..., Any]: # pylint: disable=unused-argument,invalid-name + """Pydantic field_validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: AfterValidator that returns the function unchanged + def AfterValidator(func): # pylint: disable=invalid-name + """Pydantic AfterValidator fallback when pydantic is not available.""" + return func + + # Fallback: BeforeValidator that returns the function unchanged + def BeforeValidator(func): # pylint: disable=invalid-name + """Pydantic BeforeValidator fallback when pydantic is not available.""" + return func + + # Fallback: PydanticExperimentalWarning + PydanticExperimentalWarning = Warning + + # Fallback: StrictBool + StrictBool = bool + + # Fallback: ValidationError + class ValidationError(Exception): + """ + Pydantic ValidationError fallback when pydantic is not available. + """ + + def __init__(self, message="A custom error occurred."): + self.message = message + super().__init__(self.message) + + def __str__(self): + return f"ValidationError: {self.message}" + + # Fallback: model_validator decorator that does nothing + def model_validator(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic model_validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: validator decorator that does nothing + def validator(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + else: + HAS_PYDANTIC = True # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name + + +def require_pydantic(module) -> None: + """ + # Summary + + Call `module.fail_json` if pydantic is not installed. + + Intended to be called once at the top of a module's `main()` function, + immediately after `AnsibleModule` is instantiated, to provide a clear + error message when pydantic is a required dependency. + + ## Example + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import require_pydantic + + def main(): + module = AnsibleModule(argument_spec=...) + require_pydantic(module) + ``` + + ## Raises + + None + + ## Notes + + - Does nothing if pydantic is installed. + - Uses Ansible's `missing_required_lib` to produce a standardized error + message that includes installation instructions. + """ + if not HAS_PYDANTIC: + from ansible.module_utils.basic import missing_required_lib # pylint: disable=import-outside-toplevel + + module.fail_json(msg=missing_required_lib("pydantic"), exception=PYDANTIC_IMPORT_ERROR) + + +__all__ = [ + "AfterValidator", + "BaseModel", + "BeforeValidator", + "ConfigDict", + "Field", + "HAS_PYDANTIC", + "PYDANTIC_IMPORT_ERROR", + "PydanticExperimentalWarning", + "StrictBool", + "ValidationError", + "field_serializer", + "field_validator", + "model_validator", + "require_pydantic", + "validator", +] diff --git a/plugins/module_utils/endpoints/__init__.py b/plugins/module_utils/endpoints/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py new file mode 100644 index 00000000..9da9620e --- /dev/null +++ b/plugins/module_utils/endpoints/base.py @@ -0,0 +1,134 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Base endpoint model for all ND API endpoints. + +Provides ``NDEndpointBaseModel``, the required base class for every +concrete endpoint definition. It centralizes ``model_config``, +version metadata, and enforces that subclasses define ``path``, +``verb``, and ``class_name``. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +class NDEndpointBaseModel(BaseModel, ABC): + """ + # Summary + + Abstract base model for all ND API endpoint definitions. + + ## Description + + Centralizes common configuration and version metadata that every endpoint shares. Subclasses **must** define `path`, `verb`, and `class_name`. + + ## Fields (inherited by all endpoints) + + - `api_version` — API version string (default `"v1"`) + - `min_controller_version` — minimum ND controller version (default `"3.0.0"`) + + ## Abstract members (must be defined by subclasses) + + - `path` — `@property` returning the endpoint URL path + - `verb` — `@property` returning the `HttpVerbEnum` for this endpoint + - `class_name` — Pydantic field (typically a `Literal` type) identifying the concrete class + + ## Usage + + ```python + class EpInfraLoginPost(NDEndpointBaseModel): + class_name: Literal["EpInfraLoginPost"] = Field( + default="EpInfraLoginPost", + description="Class name for backward compatibility", + ) + + @property + def path(self) -> str: + return BasePath.path("login") + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.POST + ``` + """ + + model_config = ConfigDict(validate_assignment=True) + + def __init_subclass__(cls, **kwargs: object) -> None: + """ + # Summary + + Enforce that concrete subclasses define a `class_name` field. + + ## Description + + Fires at class definition time. Skips abstract subclasses (those with remaining abstract methods) and only checks concrete endpoint classes. + + ## Raises + + ### TypeError + + - If a concrete subclass does not define a `class_name` field in its annotations + """ + super().__init_subclass__(**kwargs) + # Compute abstract methods manually because __abstractmethods__ + # is not yet set on cls when __init_subclass__ fires (ABCMeta + # sets it after type.__new__ returns). + abstracts = {name for name, value in vars(cls).items() if getattr(value, "__isabstractmethod__", False)} + for base in cls.__bases__: + for name in getattr(base, "__abstractmethods__", set()): + if getattr(getattr(cls, name, None), "__isabstractmethod__", False): + abstracts.add(name) + if abstracts: + return + if "class_name" not in getattr(cls, "__annotations__", {}): + raise TypeError( + f"{cls.__name__} must define a 'class_name' field. " + f'Example: class_name: Literal["{cls.__name__}"] = ' + f'Field(default="{cls.__name__}", frozen=True, description="...")' + ) + + # Version metadata — shared by all endpoints + api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") + min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") + + @property + @abstractmethod + def path(self) -> str: + """ + # Summary + + Return the endpoint URL path. + + ## Raises + + None + """ + + @property + @abstractmethod + def verb(self) -> HttpVerbEnum: + """ + # Summary + + Return the HTTP verb for this endpoint. + + ## Raises + + None + """ diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py new file mode 100644 index 00000000..47695611 --- /dev/null +++ b/plugins/module_utils/endpoints/mixins.py @@ -0,0 +1,86 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Reusable mixin classes for endpoint models. + +This module provides mixin classes that can be composed to add common +fields to endpoint models without duplication. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import BooleanStringEnum +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + Field, +) + + +class ClusterNameMixin(BaseModel): + """Mixin for endpoints that require cluster_name parameter.""" + + cluster_name: Optional[str] = Field(default=None, min_length=1, description="Cluster name") + + +class FabricNameMixin(BaseModel): + """Mixin for endpoints that require fabric_name parameter.""" + + fabric_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Fabric name") + + +class ForceShowRunMixin(BaseModel): + """Mixin for endpoints that require force_show_run parameter.""" + + force_show_run: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Force show running config") + + +class HealthCategoryMixin(BaseModel): + """Mixin for endpoints that require health_category parameter.""" + + health_category: Optional[str] = Field(default=None, min_length=1, description="Health category") + + +class InclAllMsdSwitchesMixin(BaseModel): + """Mixin for endpoints that require incl_all_msd_switches parameter.""" + + incl_all_msd_switches: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Include all MSD switches") + + +class LinkUuidMixin(BaseModel): + """Mixin for endpoints that require link_uuid parameter.""" + + link_uuid: Optional[str] = Field(default=None, min_length=1, description="Link UUID") + + +class LoginIdMixin(BaseModel): + """Mixin for endpoints that require login_id parameter.""" + + login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") + + +class NetworkNameMixin(BaseModel): + """Mixin for endpoints that require network_name parameter.""" + + network_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Network name") + + +class NodeNameMixin(BaseModel): + """Mixin for endpoints that require node_name parameter.""" + + node_name: Optional[str] = Field(default=None, min_length=1, description="Node name") + + +class SwitchSerialNumberMixin(BaseModel): + """Mixin for endpoints that require switch_sn parameter.""" + + switch_sn: Optional[str] = Field(default=None, min_length=1, description="Switch serial number") + + +class VrfNameMixin(BaseModel): + """Mixin for endpoints that require vrf_name parameter.""" + + vrf_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="VRF name") diff --git a/plugins/module_utils/endpoints/query_params.py b/plugins/module_utils/endpoints/query_params.py new file mode 100644 index 00000000..5bf8ff08 --- /dev/null +++ b/plugins/module_utils/endpoints/query_params.py @@ -0,0 +1,324 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Query parameter classes for API endpoints. + +This module provides composable query parameter classes for building +URL query strings. Supports endpoint-specific parameters and Lucene-style +filtering with type safety via Pydantic. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from enum import Enum +from typing import Optional, Protocol +from urllib.parse import quote + + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + Field, + field_validator, +) + + +class QueryParams(Protocol): + """ + # Summary + + Protocol for Query Parameters + + ## Description + + Structural type for all query parameter types. Any class implementing `to_query_string()` and `is_empty()` satisfies this protocol without explicit + inheritance. + + ## Design + + This allows composition of different query parameter types: + + - Endpoint-specific parameters (e.g., forceShowRun, ticketId) + - Generic Lucene-style filtering (e.g., filter, max, sort) + - Future parameter types can be added without changing existing code + """ + + def to_query_string(self) -> str: + """ + # Summary + + Convert parameters to URL query string format. + + ## Returns + + - Query string (without leading '?') + - Empty string if no parameters are set + + ### Example return value + + ```python + "forceShowRun=true&ticketId=12345" + ``` + """ + # pylint: disable=unnecessary-ellipsis + ... + + def is_empty(self) -> bool: + """ + # Summary + + Check if any parameters are set. + + ## Returns + + - True if no parameters are set + - False if at least one parameter is set + """ + # pylint: disable=unnecessary-ellipsis + ... + + +class EndpointQueryParams(BaseModel): + """ + # Summary + + Endpoint-Specific Query Parameters + + ## Description + + Query parameters specific to a particular endpoint. + These are typed and validated by Pydantic. + + ## Usage + + Subclass this for each endpoint that needs custom query parameters: + + ```python + class ConfigDeployQueryParams(EndpointQueryParams): + force_show_run: bool = False + include_all_msd_switches: bool = False + + def to_query_string(self) -> str: + params = [f"forceShowRun={str(self.force_show_run).lower()}"] + params.append(f"inclAllMSDSwitches={str(self.include_all_msd_switches).lower()}") + return "&".join(params) + ``` + """ + + def to_query_string(self) -> str: + """ + # Summary + + - Default implementation: convert all fields to key=value pairs. + - Override this method for custom formatting. + """ + params = [] + for field_name, field_value in self.model_dump(exclude_none=True).items(): + # Convert snake_case to camelCase for API compatibility + api_key = self._to_camel_case(field_name) + + # Handle different value types + if isinstance(field_value, bool): + api_value = str(field_value).lower() + elif isinstance(field_value, Enum): + # Get the enum's value (e.g., "true" or "false") + api_value = field_value.value + else: + api_value = str(field_value) + + params.append(f"{api_key}={api_value}") + return "&".join(params) + + @staticmethod + def _to_camel_case(snake_str: str) -> str: + """Convert snake_case to camelCase.""" + components = snake_str.split("_") + return components[0] + "".join(x.title() for x in components[1:]) + + def is_empty(self) -> bool: + """Check if any parameters are set.""" + return len(self.model_dump(exclude_none=True, exclude_defaults=True)) == 0 + + +class LuceneQueryParams(BaseModel): + """ + # Summary + + Lucene-Style Query Parameters + + ## Description + + Generic Lucene-style filtering query parameters for ND API. + Supports filtering, pagination, and sorting. + + ## Parameters + + - filter: Lucene filter expression (e.g., "name:MyFabric AND state:deployed") + - max: Maximum number of results to return + - offset: Offset for pagination + - sort: Sort field and direction (e.g., "name:asc", "created:desc") + - fields: Comma-separated list of fields to return + + ## Usage + + ```python + lucene = LuceneQueryParams( + filter="name:Fabric*", + max=100, + sort="name:asc" + ) + query_string = lucene.to_query_string() + # Returns: "filter=name:Fabric*&max=100&sort=name:asc" + ``` + + ## Lucene Filter Examples + + - Single field: `name:MyFabric` + - Wildcard: `name:Fabric*` + - Multiple conditions: `name:MyFabric AND state:deployed` + - Range: `created:[2024-01-01 TO 2024-12-31]` + - OR conditions: `state:deployed OR state:pending` + - NOT conditions: `NOT state:deleted` + """ + + filter: Optional[str] = Field(default=None, description="Lucene filter expression") + max: Optional[int] = Field(default=None, ge=1, le=10000, description="Maximum results") + offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") + sort: Optional[str] = Field(default=None, description="Sort field and direction (e.g., 'name:asc')") + fields: Optional[str] = Field(default=None, description="Comma-separated list of fields to return") + + @field_validator("sort") + @classmethod + def _validate_sort(cls, value): + """Validate sort format: field:direction.""" + if value is not None and ":" in value: + parts = value.split(":") + if len(parts) == 2 and parts[1].lower() not in ["asc", "desc"]: + raise ValueError("Sort direction must be 'asc' or 'desc'") + return value + + def to_query_string(self, url_encode: bool = True) -> str: + """ + Convert to URL query string format. + + ### Parameters + - url_encode: If True, URL-encode parameter values (default: True) + + ### Returns + - URL query string with encoded values + """ + params = [] + for field_name, field_value in self.model_dump(exclude_none=True).items(): + if field_value is not None: + # URL-encode the value if requested + encoded_value = quote(str(field_value), safe="") if url_encode else str(field_value) + params.append(f"{field_name}={encoded_value}") + return "&".join(params) + + def is_empty(self) -> bool: + """Check if any filter parameters are set.""" + return not self.model_dump(exclude_none=True) + + +class CompositeQueryParams: + """ + # Summary + + Composite Query Parameters + + ## Description + + Composes multiple query parameter types into a single query string. + This allows combining endpoint-specific parameters with Lucene filtering. + + ## Design Pattern + + Uses composition to combine different query parameter types without + inheritance. Each parameter type can be independently configured and tested. + + ## Usage + + ```python + # Endpoint-specific params + endpoint_params = ConfigDeployQueryParams( + force_show_run=True, + include_all_msd_switches=False + ) + + # Lucene filtering params + lucene_params = LuceneQueryParams( + filter="name:MySwitch*", + max=50, + sort="name:asc" + ) + + # Compose them together + composite = CompositeQueryParams() + composite.add(endpoint_params) + composite.add(lucene_params) + + query_string = composite.to_query_string() + # Returns: "forceShowRun=true&inclAllMSDSwitches=false&filter=name:MySwitch*&max=50&sort=name:asc" + ``` + """ + + def __init__(self) -> None: + self._param_groups: list[QueryParams] = [] + + def add(self, params: QueryParams) -> "CompositeQueryParams": + """ + # Summary + + Add a query parameter group to the composite. + + ## Parameters + + - params: Any object satisfying the `QueryParams` protocol + + ## Returns + + - Self (for method chaining) + + ## Example + + ```python + composite = CompositeQueryParams() + composite.add(endpoint_params).add(lucene_params) + ``` + """ + self._param_groups.append(params) + return self + + def to_query_string(self, url_encode: bool = True) -> str: + """ + # Summary + + Build complete query string from all parameter groups. + + ## Parameters + + - url_encode: If True, URL-encode parameter values (default: True) + + ## Returns + + - Complete query string (without leading '?') + - Empty string if no parameters are set + """ + parts = [] + for param_group in self._param_groups: + if not param_group.is_empty(): + # LuceneQueryParams supports url_encode parameter, EndpointQueryParams doesn't + if isinstance(param_group, LuceneQueryParams): + parts.append(param_group.to_query_string(url_encode=url_encode)) + else: + parts.append(param_group.to_query_string()) + return "&".join(parts) + + def is_empty(self) -> bool: + """Check if any parameters are set across all groups.""" + return all(param_group.is_empty() for param_group in self._param_groups) + + def clear(self) -> None: + """Remove all parameter groups.""" + self._param_groups.clear() diff --git a/plugins/module_utils/endpoints/v1/__init__.py b/plugins/module_utils/endpoints/v1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/endpoints/v1/infra/__init__.py b/plugins/module_utils/endpoints/v1/infra/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/endpoints/v1/infra/base_path.py b/plugins/module_utils/endpoints/v1/infra/base_path.py new file mode 100644 index 00000000..f0612025 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/infra/base_path.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Centralized base paths for ND Infra API endpoints. + +/api/v1/infra + +This module provides a single location to manage all API Infra base paths, +allowing easy modification when API paths change. All endpoint classes +should use these path builders for consistency. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Final + + +class BasePath: + """ + # Summary + + API Endpoints for ND Infra + + ## Description + + Provides centralized endpoint definitions for all ND Infra API endpoints. + This allows API path changes to be managed in a single location. + + ## Usage + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import BasePath + + # Get a complete base path for ND Infra + path = BasePath.path("aaa", "localUsers") + # Returns: /api/v1/infra/aaa/localUsers + ``` + + ## Design Notes + + - All base paths are defined as class constants for easy modification + - Helper methods compose paths from base constants + - Use these methods in Pydantic endpoint models to ensure consistency + - If ND Infra changes base API paths, only this class needs updating + """ + + API: Final = "/api/v1/infra" + + @classmethod + def path(cls, *segments: str) -> str: + """ + # Summary + + Build ND infra API path. + + ## Parameters + + - segments: Path segments to append after /api/v1/infra + + ## Returns + + - Complete ND infra API path + + ## Example + + ```python + path = BasePath.path("aaa", "localUsers") + # Returns: /api/v1/infra/aaa/localUsers + ``` + """ + if not segments: + return cls.API + return f"{cls.API}/{'/'.join(segments)}" diff --git a/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py b/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py new file mode 100644 index 00000000..607cea39 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py @@ -0,0 +1,120 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Infra ClusterHealth endpoint models. + +This module contains endpoint definitions for clusterhealth-related operations +in the ND Infra API. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ClusterNameMixin +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +class ClusterHealthConfigEndpointParams(ClusterNameMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for cluster health config endpoint. + + ## Parameters + + - cluster_name: Cluster name (optional, from `ClusterNameMixin`) + + ## Usage + + ```python + params = ClusterHealthConfigEndpointParams(cluster_name="my-cluster") + query_string = params.to_query_string() + # Returns: "clusterName=my-cluster" + ``` + """ + + +class EpInfraClusterhealthConfigGet(NDEndpointBaseModel): + """ + # Summary + + ND Infra ClusterHealth Config GET Endpoint + + ## Description + + Endpoint to retrieve cluster health configuration from the ND Infra service. + Optionally filter by cluster name using the clusterName query parameter. + + ## Path + + - /api/v1/infra/clusterhealth/config + - /api/v1/infra/clusterhealth/config?clusterName=foo + + ## Verb + + - GET + + ## Usage + + ```python + # Get cluster health config for all clusters + request = EpApiV1InfraClusterhealthConfigGet() + path = request.path + verb = request.verb + + # Get cluster health config for specific cluster + request = EpApiV1InfraClusterhealthConfigGet() + request.endpoint_params.cluster_name = "foo" + path = request.path + verb = request.verb + # Path will be: /api/v1/infra/clusterhealth/config?clusterName=foo + ``` + """ + + class_name: Literal["EpInfraClusterhealthConfigGet"] = Field( + default="EpInfraClusterhealthConfigGet", frozen=True, description="Class name for backward compatibility" + ) + + endpoint_params: ClusterHealthConfigEndpointParams = Field( + default_factory=ClusterHealthConfigEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base_path = BasePath.path("clusterhealth", "config") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py b/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py new file mode 100644 index 00000000..52e6cc14 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py @@ -0,0 +1,139 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Infra ClusterHealth endpoint models. + +This module contains endpoint definitions for clusterhealth-related operations +in the ND Infra API. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, + HealthCategoryMixin, + NodeNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +class ClusterHealthStatusEndpointParams(ClusterNameMixin, HealthCategoryMixin, NodeNameMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for cluster health status endpoint. + + ## Parameters + + - cluster_name: Cluster name (optional, from `ClusterNameMixin`) + - health_category: Health category (optional, from `HealthCategoryMixin`) + - node_name: Node name (optional, from `NodeNameMixin`) + + ## Usage + + ```python + params = ClusterHealthStatusEndpointParams( + cluster_name="my-cluster", + health_category="cpu", + node_name="node1" + ) + query_string = params.to_query_string() + # Returns: "clusterName=my-cluster&healthCategory=cpu&nodeName=node1" + ``` + """ + + +class EpInfraClusterhealthStatusGet(NDEndpointBaseModel): + """ + # Summary + + ND Infra ClusterHealth Status GET Endpoint + + ## Description + + Endpoint to retrieve cluster health status from the ND Infra service. + Optionally filter by cluster name, health category, and/or node name using query parameters. + + ## Path + + - /api/v1/infra/clusterhealth/status + - /api/v1/infra/clusterhealth/status?clusterName=foo + - /api/v1/infra/clusterhealth/status?clusterName=foo&healthCategory=bar&nodeName=baz + + ## Verb + + - GET + + ## Usage + + ```python + # Get cluster health status for all clusters + request = EpApiV1InfraClusterhealthStatusGet() + path = request.path + verb = request.verb + + # Get cluster health status for specific cluster + request = EpApiV1InfraClusterhealthStatusGet() + request.endpoint_params.cluster_name = "foo" + path = request.path + verb = request.verb + + # Get cluster health status with all filters + request = EpApiV1InfraClusterhealthStatusGet() + request.endpoint_params.cluster_name = "foo" + request.endpoint_params.health_category = "bar" + request.endpoint_params.node_name = "baz" + path = request.path + verb = request.verb + # Path will be: /api/v1/infra/clusterhealth/status?clusterName=foo&healthCategory=bar&nodeName=baz + ``` + """ + + class_name: Literal["EpInfraClusterhealthStatusGet"] = Field( + default="EpInfraClusterhealthStatusGet", frozen=True, description="Class name for backward compatibility" + ) + + endpoint_params: ClusterHealthStatusEndpointParams = Field( + default_factory=ClusterHealthStatusEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base_path = BasePath.path("clusterhealth", "status") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/infra/login.py b/plugins/module_utils/endpoints/v1/infra/login.py new file mode 100644 index 00000000..70d894d4 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/infra/login.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Infra Login endpoint model. + +This module contains the endpoint definition for the ND Infra login operation. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +class EpInfraLoginPost(NDEndpointBaseModel): + """ + # Summary + + ND Infra Login POST Endpoint + + ## Description + + Endpoint to authenticate against the ND Infra login service. + + ## Path + + - /api/v1/infra/login + + ## Verb + + - POST + + ## Usage + + ```python + request = EpInfraLoginPost() + path = request.path + verb = request.verb + ``` + + ## Raises + + None + """ + + class_name: Literal["EpInfraLoginPost"] = Field(default="EpInfraLoginPost", frozen=True, description="Class name for backward compatibility") + + @property + def path(self) -> str: + """ + # Summary + + Return the endpoint path. + + ## Returns + + - Complete endpoint path string + + ## Raises + + None + """ + return BasePath.path("login") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/__init__.py b/plugins/module_utils/endpoints/v1/manage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/endpoints/v1/manage/base_path.py b/plugins/module_utils/endpoints/v1/manage/base_path.py new file mode 100644 index 00000000..5f043ced --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/base_path.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Centralized base paths for ND Manage API endpoints. + +/api/v1/manage + +This module provides a single location to manage all API Manage base paths, +allowing easy modification when API paths change. All endpoint classes +should use these path builders for consistency. +""" + +from __future__ import absolute_import, annotations, division, print_function + + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Final + + +class BasePath: + """ + # Summary + + API Endpoints for ND Manage + + ## Description + + Provides centralized endpoint definitions for all ND Manage API endpoints. + This allows API path changes to be managed in a single location. + + ## Usage + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import BasePath + + # Get a complete base path for ND Manage + path = BasePath.path("inventory", "switches") + # Returns: /api/v1/manage/inventory/switches + ``` + + ## Design Notes + + - All base paths are defined as class constants for easy modification + - Helper methods compose paths from base constants + - Use these methods in Pydantic endpoint models to ensure consistency + - If ND Manage changes base API paths, only this class needs updating + """ + + API: Final = "/api/v1/manage" + + @classmethod + def path(cls, *segments: str) -> str: + """ + # Summary + + Build ND manage API path. + + ## Parameters + + - segments: Path segments to append after /api/v1/manage + + ## Returns + + - Complete ND manage API path + + ## Example + + ```python + path = BasePath.path("inventory", "switches") + # Returns: /api/v1/manage/inventory/switches + ``` + """ + if not segments: + return cls.API + return f"{cls.API}/{'/'.join(segments)}" diff --git a/plugins/module_utils/enums.py b/plugins/module_utils/enums.py new file mode 100644 index 00000000..55d1f1ac --- /dev/null +++ b/plugins/module_utils/enums.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# pylint: disable=wrong-import-position +# pylint: disable=missing-module-docstring +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# Summary + +Enum definitions for Nexus Dashboard Ansible modules. + +## Enums + +- HttpVerbEnum: Enum for HTTP verb values used in endpoints. +- OperationType: Enum for operation types used by Results to determine if changes have occurred. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +from enum import Enum + + +class BooleanStringEnum(str, Enum): + """ + # Summary + + Enum for boolean string values used in query parameters. + + ## Members + + - TRUE: Represents the string "true". + - FALSE: Represents the string "false". + """ + + TRUE = "true" + FALSE = "false" + + +class HttpVerbEnum(str, Enum): + """ + # Summary + + Enum for HTTP verb values used in endpoints. + + ## Members + + - GET: Represents the HTTP GET method. + - POST: Represents the HTTP POST method. + - PUT: Represents the HTTP PUT method. + - DELETE: Represents the HTTP DELETE method. + - PATCH: Represents the HTTP PATCH method. + """ + + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + PATCH = "PATCH" + + @classmethod + def values(cls) -> list[str]: + """ + # Summary + + Returns a list of all enum values. + + ## Returns + + - A list of string values representing the enum members. + """ + return sorted([member.value for member in cls]) + + +class OperationType(Enum): + """ + # Summary + + Enumeration for operation types. + + Used by Results to determine if changes have occurred based on the operation type. + + - QUERY: Represents a query operation which does not change state. + - CREATE: Represents a create operation which adds new resources. + - UPDATE: Represents an update operation which modifies existing resources. + - DELETE: Represents a delete operation which removes resources. + + # Usage + + ```python + from plugins.module_utils.enums import OperationType + class MyModule: + def __init__(self): + self.operation_type = OperationType.QUERY + ``` + + The above informs the Results class that the current operation is a query, and thus + no changes should be expected. + + Specifically, Results._determine_if_changed() will return False for QUERY operations, + while it will evaluate CREATE, UPDATE, and DELETE operations in more detail to + determine if any changes have occurred. + """ + + QUERY = "query" + CREATE = "create" + UPDATE = "update" + DELETE = "delete" + + def changes_state(self) -> bool: + """ + # Summary + + Return True if this operation type can change controller state. + + ## Returns + + - `bool`: True if operation can change state, False otherwise + + ## Examples + + ```python + OperationType.QUERY.changes_state() # Returns False + OperationType.CREATE.changes_state() # Returns True + OperationType.DELETE.changes_state() # Returns True + ``` + """ + return self in ( + OperationType.CREATE, + OperationType.UPDATE, + OperationType.DELETE, + ) + + def is_read_only(self) -> bool: + """ + # Summary + + Return True if this operation type is read-only. + + ## Returns + + - `bool`: True if operation is read-only, False otherwise + + ## Examples + + ```python + OperationType.QUERY.is_read_only() # Returns True + OperationType.CREATE.is_read_only() # Returns False + ``` + """ + return self == OperationType.QUERY diff --git a/tests/unit/module_utils/common_utils.py b/tests/unit/module_utils/common_utils.py new file mode 100644 index 00000000..bc64b0d6 --- /dev/null +++ b/tests/unit/module_utils/common_utils.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Common utilities used by unit tests. +""" + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +from contextlib import contextmanager + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.log import Log +from ansible_collections.cisco.nd.tests.unit.module_utils.fixtures.load_fixture import load_fixture +from ansible_collections.cisco.nd.tests.unit.module_utils.response_generator import ResponseGenerator +from ansible_collections.cisco.nd.tests.unit.module_utils.sender_file import Sender as SenderFile + +params = { + "state": "merged", + "config": {"switches": [{"ip_address": "172.22.150.105"}]}, + "check_mode": False, +} + + +# See the following for explanation of why fixtures are explicitely named +# https://pylint.pycqa.org/en/latest/user_guide/messages/warning/redefined-outer-name.html +# @pytest.fixture(name="controller_version") +# def controller_version_fixture(): +# """ +# return ControllerVersion instance. +# """ +# return ControllerVersion() +@pytest.fixture(name="sender_file") +def sender_file_fixture(): + """ + return Send() imported from sender_file.py + """ + + def responses(): + yield {} + + instance = SenderFile() + instance.gen = ResponseGenerator(responses()) + return instance + + +@pytest.fixture(name="log") +def log_fixture(): + """ + return Log instance + """ + return Log() + + +@contextmanager +def does_not_raise(): + """ + A context manager that does not raise an exception. + """ + yield + + +def responses_sender_file(key: str) -> dict[str, str]: + """ + Return data in responses_SenderFile.json + """ + response_file = "responses_SenderFile" + response = load_fixture(response_file).get(key) + print(f"responses_sender_file: {key} : {response}") + return response diff --git a/tests/unit/module_utils/endpoints/test_base_model.py b/tests/unit/module_utils/endpoints/test_base_model.py new file mode 100644 index 00000000..e2db13be --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_base_model.py @@ -0,0 +1,245 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for NDEndpointBaseModel.__init_subclass__() + +Tests the class_name enforcement logic that ensures concrete +subclasses of NDEndpointBaseModel define a class_name field. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +# pylint: disable=unused-import +# pylint: disable=unused-variable +# pylint: disable=missing-function-docstring +# pylint: disable=missing-class-docstring +# pylint: disable=too-few-public-methods + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +import pytest + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: __init_subclass__ — concrete subclass with class_name +# ============================================================================= + + +def test_base_model_00100(): + """ + # Summary + + Verify a concrete subclass with `class_name` defined is accepted. + + ## Test + + - Concrete subclass defines `class_name`, `path`, and `verb` + - Class definition succeeds without error + - Instance can be created and `class_name` is correct + + ## Classes and Methods + + - NDEndpointBaseModel.__init_subclass__() + """ + + class _GoodEndpoint(NDEndpointBaseModel): + class_name: Literal["_GoodEndpoint"] = Field(default="_GoodEndpoint", frozen=True, description="Class name") + + @property + def path(self) -> str: + return "/api/v1/test/good" + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.GET + + with does_not_raise(): + instance = _GoodEndpoint() + assert instance.class_name == "_GoodEndpoint" + + +# ============================================================================= +# Test: __init_subclass__ — concrete subclass missing class_name +# ============================================================================= + + +def test_base_model_00200(): + """ + # Summary + + Verify a concrete subclass without `class_name` raises `TypeError` at class definition time. + + ## Test + + - Concrete subclass defines `path` and `verb` but omits `class_name` + - `TypeError` is raised when the class is defined (not when instantiated) + + ## Classes and Methods + + - NDEndpointBaseModel.__init_subclass__() + """ + match = r"_BadEndpoint must define a 'class_name' field" + with pytest.raises(TypeError, match=match): + + class _BadEndpoint(NDEndpointBaseModel): + + @property + def path(self) -> str: + return "/api/v1/test/bad" + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.GET + + +# ============================================================================= +# Test: __init_subclass__ — intermediate abstract subclass skipped +# ============================================================================= + + +def test_base_model_00300(): + """ + # Summary + + Verify an intermediate abstract subclass without `class_name` is allowed. + + ## Test + + - Intermediate ABC adds a new abstract method but does not define `class_name` + - No `TypeError` is raised at class definition time + - A concrete subclass of the intermediate ABC with `class_name` can be instantiated + + ## Classes and Methods + + - NDEndpointBaseModel.__init_subclass__() + """ + + class _MiddleABC(NDEndpointBaseModel, ABC): + + @property + @abstractmethod + def extra(self) -> str: + """Return extra info.""" + + class _ConcreteFromMiddle(_MiddleABC): + class_name: Literal["_ConcreteFromMiddle"] = Field(default="_ConcreteFromMiddle", frozen=True, description="Class name") + + @property + def path(self) -> str: + return "/api/v1/test/middle" + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.GET + + @property + def extra(self) -> str: + return "extra" + + with does_not_raise(): + instance = _ConcreteFromMiddle() + assert instance.class_name == "_ConcreteFromMiddle" + assert instance.extra == "extra" + + +# ============================================================================= +# Test: __init_subclass__ — concrete subclass of intermediate ABC missing class_name +# ============================================================================= + + +def test_base_model_00310(): + """ + # Summary + + Verify a concrete subclass of an intermediate ABC without `class_name` raises `TypeError`. + + ## Test + + - Intermediate ABC adds a new abstract method + - Concrete subclass implements all abstract methods but omits `class_name` + - `TypeError` is raised at class definition time + + ## Classes and Methods + + - NDEndpointBaseModel.__init_subclass__() + """ + + class _MiddleABC2(NDEndpointBaseModel, ABC): + + @property + @abstractmethod + def extra(self) -> str: + """Return extra info.""" + + match = r"_BadConcreteFromMiddle must define a 'class_name' field" + with pytest.raises(TypeError, match=match): + + class _BadConcreteFromMiddle(_MiddleABC2): + + @property + def path(self) -> str: + return "/api/v1/test/bad-middle" + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.GET + + @property + def extra(self) -> str: + return "extra" + + +# ============================================================================= +# Test: __init_subclass__ — error message includes example +# ============================================================================= + + +def test_base_model_00400(): + """ + # Summary + + Verify the `TypeError` message includes a helpful example with the class name. + + ## Test + + - Concrete subclass omits `class_name` + - Error message contains the class name in the `Literal` and `Field` example + + ## Classes and Methods + + - NDEndpointBaseModel.__init_subclass__() + """ + with pytest.raises(TypeError, match=r'Literal\["_ExampleEndpoint"\]') as exc_info: + + class _ExampleEndpoint(NDEndpointBaseModel): + + @property + def path(self) -> str: + return "/api/v1/test/example" + + @property + def verb(self) -> HttpVerbEnum: + return HttpVerbEnum.GET + + assert "_ExampleEndpoint" in str(exc_info.value) + assert "frozen=True" in str(exc_info.value) diff --git a/tests/unit/module_utils/endpoints/test_base_paths_infra.py b/tests/unit/module_utils/endpoints/test_base_paths_infra.py new file mode 100644 index 00000000..e25c4a4a --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_base_paths_infra.py @@ -0,0 +1,267 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for base_paths_infra.py + +Tests the BasePath class methods for building ND Infra API paths +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: BasePath.API constant +# ============================================================================= + + +def test_base_paths_infra_00010(): + """ + # Summary + + Verify API constant equals "/api/v1/infra" + + ## Test + + - BasePath.API equals "/api/v1/infra" + + ## Classes and Methods + + - BasePath.API + """ + with does_not_raise(): + result = BasePath.API + assert result == "/api/v1/infra" + + +# ============================================================================= +# Test: path() method +# ============================================================================= + + +def test_base_paths_infra_00100(): + """ + # Summary + + Verify path() with no segments returns API root + + ## Test + + - path() returns "/api/v1/infra" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path() + assert result == "/api/v1/infra" + + +def test_base_paths_infra_00110(): + """ + # Summary + + Verify path() with single segment + + ## Test + + - path("aaa") returns "/api/v1/infra/aaa" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("aaa") + assert result == "/api/v1/infra/aaa" + + +def test_base_paths_infra_00120(): + """ + # Summary + + Verify path() with multiple segments + + ## Test + + - path("aaa", "localUsers") returns "/api/v1/infra/aaa/localUsers" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("aaa", "localUsers") + assert result == "/api/v1/infra/aaa/localUsers" + + +def test_base_paths_infra_00130(): + """ + # Summary + + Verify path() with three segments + + ## Test + + - path("aaa", "localUsers", "user1") returns correct path + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("aaa", "localUsers", "user1") + assert result == "/api/v1/infra/aaa/localUsers/user1" + + +def test_base_paths_infra_00140(): + """ + # Summary + + Verify path() builds clusterhealth paths + + ## Test + + - path("clusterhealth") returns "/api/v1/infra/clusterhealth" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("clusterhealth") + assert result == "/api/v1/infra/clusterhealth" + + +def test_base_paths_infra_00150(): + """ + # Summary + + Verify path() builds clusterhealth config path + + ## Test + + - path("clusterhealth", "config") returns "/api/v1/infra/clusterhealth/config" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("clusterhealth", "config") + assert result == "/api/v1/infra/clusterhealth/config" + + +def test_base_paths_infra_00160(): + """ + # Summary + + Verify path() builds clusterhealth status path + + ## Test + + - path("clusterhealth", "status") returns "/api/v1/infra/clusterhealth/status" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("clusterhealth", "status") + assert result == "/api/v1/infra/clusterhealth/status" + + +def test_base_paths_infra_00170(): + """ + # Summary + + Verify path() builds clusterhealth path with multiple segments + + ## Test + + - path("clusterhealth", "config", "cluster1") returns correct path + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("clusterhealth", "config", "cluster1") + assert result == "/api/v1/infra/clusterhealth/config/cluster1" + + +# ============================================================================= +# Test: Edge cases +# ============================================================================= + + +def test_base_paths_infra_00500(): + """ + # Summary + + Verify empty string segment is handled + + ## Test + + - path("aaa", "", "localUsers") creates path with empty segment + - This creates double slashes (expected behavior) + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("aaa", "", "localUsers") + assert result == "/api/v1/infra/aaa//localUsers" + + +def test_base_paths_infra_00510(): + """ + # Summary + + Verify segments with special characters + + ## Test + + - path("aaa", "user-name_123") handles hyphens and underscores + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("aaa", "user-name_123") + assert result == "/api/v1/infra/aaa/user-name_123" + + +def test_base_paths_infra_00520(): + """ + # Summary + + Verify segments with spaces (no URL encoding) + + ## Test + + - BasePath does not URL-encode spaces + - URL encoding is caller's responsibility + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("my path") + assert result == "/api/v1/infra/my path" diff --git a/tests/unit/module_utils/endpoints/test_base_paths_manage.py b/tests/unit/module_utils/endpoints/test_base_paths_manage.py new file mode 100644 index 00000000..07fdd892 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_base_paths_manage.py @@ -0,0 +1,191 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for base_paths_manage.py + +Tests the BasePath class methods for building ND Manage API paths +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: BasePath.API constant +# ============================================================================= + + +def test_base_paths_manage_00010(): + """ + # Summary + + Verify API constant equals "/api/v1/manage" + + ## Test + + - BasePath.API equals "/api/v1/manage" + + ## Classes and Methods + + - BasePath.API + """ + with does_not_raise(): + result = BasePath.API + assert result == "/api/v1/manage" + + +# ============================================================================= +# Test: path() method +# ============================================================================= + + +def test_base_paths_manage_00100(): + """ + # Summary + + Verify path() with no segments returns API root + + ## Test + + - path() returns "/api/v1/manage" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path() + assert result == "/api/v1/manage" + + +def test_base_paths_manage_00110(): + """ + # Summary + + Verify path() with single segment + + ## Test + + - path("inventory") returns "/api/v1/manage/inventory" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("inventory") + assert result == "/api/v1/manage/inventory" + + +def test_base_paths_manage_00120(): + """ + # Summary + + Verify path() with multiple segments + + ## Test + + - path("inventory", "switches") returns "/api/v1/manage/inventory/switches" + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("inventory", "switches") + assert result == "/api/v1/manage/inventory/switches" + + +def test_base_paths_manage_00130(): + """ + # Summary + + Verify path() with three segments + + ## Test + + - path("inventory", "switches", "fabric1") returns correct path + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("inventory", "switches", "fabric1") + assert result == "/api/v1/manage/inventory/switches/fabric1" + + +# ============================================================================= +# Test: Edge cases +# ============================================================================= + + +def test_base_paths_manage_00400(): + """ + # Summary + + Verify empty string segment is handled + + ## Test + + - path("inventory", "", "switches") creates path with empty segment + - This creates double slashes (expected behavior) + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("inventory", "", "switches") + assert result == "/api/v1/manage/inventory//switches" + + +def test_base_paths_manage_00410(): + """ + # Summary + + Verify segments with special characters + + ## Test + + - path("inventory", "fabric-name_123") handles hyphens and underscores + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("inventory", "fabric-name_123") + assert result == "/api/v1/manage/inventory/fabric-name_123" + + +def test_base_paths_manage_00420(): + """ + # Summary + + Verify segments with spaces (no URL encoding) + + ## Test + + - BasePath does not URL-encode spaces + - URL encoding is caller's responsibility + + ## Classes and Methods + + - BasePath.path() + """ + with does_not_raise(): + result = BasePath.path("my path") + assert result == "/api/v1/manage/my path" diff --git a/tests/unit/module_utils/endpoints/test_endpoint_mixins.py b/tests/unit/module_utils/endpoints/test_endpoint_mixins.py new file mode 100644 index 00000000..f122d29a --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoint_mixins.py @@ -0,0 +1,82 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for endpoint_mixins.py + +Tests the mixin classes for endpoint models. +Only tests that verify our configuration constraints or our design +patterns (composition) are included. Simple default/getter/setter tests +are omitted as they test Pydantic itself, not our code. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, + ForceShowRunMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import BooleanStringEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: Validation constraints +# ============================================================================= + + +def test_endpoint_mixins_00220(): + """ + # Summary + + Verify FabricNameMixin validates max length + + ## Test + + - fabric_name rejects strings longer than 64 characters + + ## Classes and Methods + + - FabricNameMixin.fabric_name + """ + long_name = "a" * 65 # 65 characters + with pytest.raises(ValueError): + FabricNameMixin(fabric_name=long_name) + + +# ============================================================================= +# Test: Mixin composition +# ============================================================================= + + +def test_endpoint_mixins_01100(): + """ + # Summary + + Verify mixins can be composed together + + ## Test + + - Multiple mixins can be combined in a single class + + ## Classes and Methods + + - FabricNameMixin + - ForceShowRunMixin + """ + + # Create a composite class using multiple mixins + class CompositeParams(FabricNameMixin, ForceShowRunMixin): + pass + + with does_not_raise(): + instance = CompositeParams(fabric_name="MyFabric", force_show_run=BooleanStringEnum.TRUE) + assert instance.fabric_name == "MyFabric" + assert instance.force_show_run == BooleanStringEnum.TRUE diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_clusterhealth.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_clusterhealth.py new file mode 100644 index 00000000..e4a3be8e --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_clusterhealth.py @@ -0,0 +1,485 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for ep_api_v1_infra_clusterhealth.py + +Tests the ND Infra ClusterHealth endpoint classes +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.clusterhealth_config import ( + ClusterHealthConfigEndpointParams, + EpInfraClusterhealthConfigGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.clusterhealth_status import ( + ClusterHealthStatusEndpointParams, + EpInfraClusterhealthStatusGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: ClusterHealthConfigEndpointParams +# ============================================================================= + + +def test_endpoints_clusterhealth_00010(): + """ + # Summary + + Verify ClusterHealthConfigEndpointParams default values + + ## Test + + - cluster_name defaults to None + + ## Classes and Methods + + - ClusterHealthConfigEndpointParams.__init__() + """ + with does_not_raise(): + params = ClusterHealthConfigEndpointParams() + assert params.cluster_name is None + + +def test_endpoints_clusterhealth_00020(): + """ + # Summary + + Verify ClusterHealthConfigEndpointParams cluster_name can be set + + ## Test + + - cluster_name can be set to a string value + + ## Classes and Methods + + - ClusterHealthConfigEndpointParams.__init__() + """ + with does_not_raise(): + params = ClusterHealthConfigEndpointParams(cluster_name="my-cluster") + assert params.cluster_name == "my-cluster" + + +def test_endpoints_clusterhealth_00030(): + """ + # Summary + + Verify ClusterHealthConfigEndpointParams generates correct query string + + ## Test + + - to_query_string() returns correct format with cluster_name + + ## Classes and Methods + + - ClusterHealthConfigEndpointParams.to_query_string() + """ + with does_not_raise(): + params = ClusterHealthConfigEndpointParams(cluster_name="test-cluster") + result = params.to_query_string() + assert result == "clusterName=test-cluster" + + +def test_endpoints_clusterhealth_00040(): + """ + # Summary + + Verify ClusterHealthConfigEndpointParams empty query string + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - ClusterHealthConfigEndpointParams.to_query_string() + """ + with does_not_raise(): + params = ClusterHealthConfigEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: ClusterHealthStatusEndpointParams +# ============================================================================= + + +def test_endpoints_clusterhealth_00100(): + """ + # Summary + + Verify ClusterHealthStatusEndpointParams default values + + ## Test + + - All parameters default to None + + ## Classes and Methods + + - ClusterHealthStatusEndpointParams.__init__() + """ + with does_not_raise(): + params = ClusterHealthStatusEndpointParams() + assert params.cluster_name is None + assert params.health_category is None + assert params.node_name is None + + +def test_endpoints_clusterhealth_00110(): + """ + # Summary + + Verify ClusterHealthStatusEndpointParams all params can be set + + ## Test + + - All three parameters can be set + + ## Classes and Methods + + - ClusterHealthStatusEndpointParams.__init__() + """ + with does_not_raise(): + params = ClusterHealthStatusEndpointParams(cluster_name="cluster1", health_category="cpu", node_name="node1") + assert params.cluster_name == "cluster1" + assert params.health_category == "cpu" + assert params.node_name == "node1" + + +def test_endpoints_clusterhealth_00120(): + """ + # Summary + + Verify ClusterHealthStatusEndpointParams query string with all params + + ## Test + + - to_query_string() returns correct format with all parameters + + ## Classes and Methods + + - ClusterHealthStatusEndpointParams.to_query_string() + """ + with does_not_raise(): + params = ClusterHealthStatusEndpointParams(cluster_name="foo", health_category="bar", node_name="baz") + result = params.to_query_string() + assert set(result.split("&")) == {"clusterName=foo", "healthCategory=bar", "nodeName=baz"} + + +def test_endpoints_clusterhealth_00130(): + """ + # Summary + + Verify ClusterHealthStatusEndpointParams query string with partial params + + ## Test + + - to_query_string() only includes set parameters + + ## Classes and Methods + + - ClusterHealthStatusEndpointParams.to_query_string() + """ + with does_not_raise(): + params = ClusterHealthStatusEndpointParams(cluster_name="foo", node_name="baz") + result = params.to_query_string() + assert set(result.split("&")) == {"clusterName=foo", "nodeName=baz"} + + +# ============================================================================= +# Test: EpInfraClusterhealthConfigGet +# ============================================================================= + + +def test_endpoints_clusterhealth_00200(): + """ + # Summary + + Verify EpInfraClusterhealthConfigGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpInfraClusterhealthConfigGet.__init__() + - EpInfraClusterhealthConfigGet.verb + - EpInfraClusterhealthConfigGet.class_name + """ + with does_not_raise(): + instance = EpInfraClusterhealthConfigGet() + assert instance.class_name == "EpInfraClusterhealthConfigGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_clusterhealth_00210(): + """ + # Summary + + Verify EpInfraClusterhealthConfigGet path without params + + ## Test + + - path returns base path when no query params are set + + ## Classes and Methods + + - EpInfraClusterhealthConfigGet.path + """ + with does_not_raise(): + instance = EpInfraClusterhealthConfigGet() + result = instance.path + assert result == "/api/v1/infra/clusterhealth/config" + + +def test_endpoints_clusterhealth_00220(): + """ + # Summary + + Verify EpInfraClusterhealthConfigGet path with cluster_name + + ## Test + + - path includes query string when cluster_name is set + + ## Classes and Methods + + - EpInfraClusterhealthConfigGet.path + - EpInfraClusterhealthConfigGet.endpoint_params + """ + with does_not_raise(): + instance = EpInfraClusterhealthConfigGet() + instance.endpoint_params.cluster_name = "my-cluster" + result = instance.path + assert result == "/api/v1/infra/clusterhealth/config?clusterName=my-cluster" + + +def test_endpoints_clusterhealth_00230(): + """ + # Summary + + Verify EpInfraClusterhealthConfigGet params at instantiation + + ## Test + + - endpoint_params can be provided during instantiation + + ## Classes and Methods + + - EpInfraClusterhealthConfigGet.__init__() + """ + with does_not_raise(): + params = ClusterHealthConfigEndpointParams(cluster_name="test-cluster") + instance = EpInfraClusterhealthConfigGet(endpoint_params=params) + assert instance.endpoint_params.cluster_name == "test-cluster" + assert instance.path == "/api/v1/infra/clusterhealth/config?clusterName=test-cluster" + + +# ============================================================================= +# Test: EpInfraClusterhealthStatusGet +# ============================================================================= + + +def test_endpoints_clusterhealth_00300(): + """ + # Summary + + Verify EpInfraClusterhealthStatusGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpInfraClusterhealthStatusGet.__init__() + - EpInfraClusterhealthStatusGet.verb + - EpInfraClusterhealthStatusGet.class_name + """ + with does_not_raise(): + instance = EpInfraClusterhealthStatusGet() + assert instance.class_name == "EpInfraClusterhealthStatusGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_clusterhealth_00310(): + """ + # Summary + + Verify EpInfraClusterhealthStatusGet path without params + + ## Test + + - path returns base path when no query params are set + + ## Classes and Methods + + - EpInfraClusterhealthStatusGet.path + """ + with does_not_raise(): + instance = EpInfraClusterhealthStatusGet() + result = instance.path + assert result == "/api/v1/infra/clusterhealth/status" + + +def test_endpoints_clusterhealth_00320(): + """ + # Summary + + Verify EpInfraClusterhealthStatusGet path with single param + + ## Test + + - path includes query string with cluster_name + + ## Classes and Methods + + - EpInfraClusterhealthStatusGet.path + - EpInfraClusterhealthStatusGet.endpoint_params + """ + with does_not_raise(): + instance = EpInfraClusterhealthStatusGet() + instance.endpoint_params.cluster_name = "foo" + result = instance.path + assert result == "/api/v1/infra/clusterhealth/status?clusterName=foo" + + +def test_endpoints_clusterhealth_00330(): + """ + # Summary + + Verify EpInfraClusterhealthStatusGet path with all params + + ## Test + + - path includes query string with all parameters + + ## Classes and Methods + + - EpInfraClusterhealthStatusGet.path + - EpInfraClusterhealthStatusGet.endpoint_params + """ + with does_not_raise(): + instance = EpInfraClusterhealthStatusGet() + instance.endpoint_params.cluster_name = "foo" + instance.endpoint_params.health_category = "bar" + instance.endpoint_params.node_name = "baz" + result = instance.path + base, query = result.split("?", 1) + assert base == "/api/v1/infra/clusterhealth/status" + assert set(query.split("&")) == {"clusterName=foo", "healthCategory=bar", "nodeName=baz"} + + +def test_endpoints_clusterhealth_00340(): + """ + # Summary + + Verify EpInfraClusterhealthStatusGet with partial params + + ## Test + + - path only includes set parameters in query string + + ## Classes and Methods + + - EpInfraClusterhealthStatusGet.path + """ + with does_not_raise(): + instance = EpInfraClusterhealthStatusGet() + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.node_name = "node1" + result = instance.path + base, query = result.split("?", 1) + assert base == "/api/v1/infra/clusterhealth/status" + assert set(query.split("&")) == {"clusterName=cluster1", "nodeName=node1"} + + +# ============================================================================= +# Test: Pydantic validation +# ============================================================================= + + +def test_endpoints_clusterhealth_00400(): + """ + # Summary + + Verify Pydantic validation for empty string + + ## Test + + - Empty string is rejected for cluster_name (min_length=1) + + ## Classes and Methods + + - ClusterHealthConfigEndpointParams.__init__() + """ + with pytest.raises(ValueError): + ClusterHealthConfigEndpointParams(cluster_name="") + + +def test_endpoints_clusterhealth_00410(): + """ + # Summary + + Verify parameters can be modified after instantiation + + ## Test + + - endpoint_params can be changed after object creation + + ## Classes and Methods + + - EpInfraClusterhealthConfigGet.endpoint_params + """ + with does_not_raise(): + instance = EpInfraClusterhealthConfigGet() + assert instance.path == "/api/v1/infra/clusterhealth/config" + + instance.endpoint_params.cluster_name = "new-cluster" + assert instance.path == "/api/v1/infra/clusterhealth/config?clusterName=new-cluster" + + +def test_endpoints_clusterhealth_00420(): + """ + # Summary + + Verify snake_case to camelCase conversion + + ## Test + + - cluster_name converts to clusterName in query string + - health_category converts to healthCategory + - node_name converts to nodeName + + ## Classes and Methods + + - ClusterHealthStatusEndpointParams.to_query_string() + """ + with does_not_raise(): + params = ClusterHealthStatusEndpointParams(cluster_name="test", health_category="cpu", node_name="node1") + result = params.to_query_string() + # Verify camelCase conversion + assert "clusterName=" in result + assert "healthCategory=" in result + assert "nodeName=" in result + # Verify no snake_case + assert "cluster_name" not in result + assert "health_category" not in result + assert "node_name" not in result diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_login.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_login.py new file mode 100644 index 00000000..b3b88a1b --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_login.py @@ -0,0 +1,68 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for infra_login.py + +Tests the ND Infra Login endpoint class +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.login import ( + EpInfraLoginPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + + +def test_endpoints_api_v1_infra_login_00010(): + """ + # Summary + + Verify EpInfraLoginPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpInfraLoginPost.__init__() + - EpInfraLoginPost.class_name + - EpInfraLoginPost.verb + """ + with does_not_raise(): + instance = EpInfraLoginPost() + assert instance.class_name == "EpInfraLoginPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_infra_login_00020(): + """ + # Summary + + Verify EpInfraLoginPost path + + ## Test + + - path returns /api/v1/infra/login + + ## Classes and Methods + + - EpInfraLoginPost.path + """ + with does_not_raise(): + instance = EpInfraLoginPost() + result = instance.path + assert result == "/api/v1/infra/login" diff --git a/tests/unit/module_utils/endpoints/test_query_params.py b/tests/unit/module_utils/endpoints/test_query_params.py new file mode 100644 index 00000000..03500336 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_query_params.py @@ -0,0 +1,845 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for query_params.py + +Tests the query parameter composition classes +""" + +# pylint: disable=protected-access + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + CompositeQueryParams, + EndpointQueryParams, + LuceneQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import BooleanStringEnum +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import Field +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Helper test class for EndpointQueryParams +# ============================================================================= + + +class SampleEndpointParams(EndpointQueryParams): + """Sample implementation of EndpointQueryParams for testing.""" + + force_show_run: BooleanStringEnum | None = Field(default=None) + fabric_name: str | None = Field(default=None) + switch_count: int | None = Field(default=None) + + +# ============================================================================= +# Test: EndpointQueryParams +# ============================================================================= + + +def test_query_params_00010(): + """ + # Summary + + Verify EndpointQueryParams default implementation + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + """ + with does_not_raise(): + params = SampleEndpointParams() + result = params.to_query_string() + # Only non-None, non-default values are included + assert result == "" + + +def test_query_params_00020(): + """ + # Summary + + Verify EndpointQueryParams snake_case to camelCase conversion + + ## Test + + - force_show_run converts to forceShowRun + - fabric_name converts to fabricName + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + - EndpointQueryParams._to_camel_case() + """ + with does_not_raise(): + params = SampleEndpointParams(force_show_run=BooleanStringEnum.TRUE, fabric_name="Fabric1") + result = params.to_query_string() + assert "forceShowRun=" in result + assert "fabricName=" in result + # Verify no snake_case + assert "force_show_run" not in result + assert "fabric_name" not in result + + +def test_query_params_00030(): + """ + # Summary + + Verify EndpointQueryParams handles Enum values + + ## Test + + - BooleanStringEnum.TRUE converts to "true" + - BooleanStringEnum.FALSE converts to "false" + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + """ + with does_not_raise(): + params = SampleEndpointParams(force_show_run=BooleanStringEnum.TRUE) + result = params.to_query_string() + assert "forceShowRun=true" in result + + +def test_query_params_00040(): + """ + # Summary + + Verify EndpointQueryParams handles integer values + + ## Test + + - Integer values are converted to strings + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + """ + with does_not_raise(): + params = SampleEndpointParams(switch_count=42) + result = params.to_query_string() + assert result == "switchCount=42" + + +def test_query_params_00050(): + """ + # Summary + + Verify EndpointQueryParams handles string values + + ## Test + + - String values are included as-is + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + """ + with does_not_raise(): + params = SampleEndpointParams(fabric_name="MyFabric") + result = params.to_query_string() + assert result == "fabricName=MyFabric" + + +def test_query_params_00060(): + """ + # Summary + + Verify EndpointQueryParams handles multiple params + + ## Test + + - Multiple parameters are joined with '&' + + ## Classes and Methods + + - EndpointQueryParams.to_query_string() + """ + with does_not_raise(): + params = SampleEndpointParams(force_show_run=BooleanStringEnum.TRUE, fabric_name="Fabric1", switch_count=10) + result = params.to_query_string() + assert "forceShowRun=true" in result + assert "fabricName=Fabric1" in result + assert "switchCount=10" in result + assert result.count("&") == 2 + + +def test_query_params_00070(): + """ + # Summary + + Verify EndpointQueryParams is_empty() method + + ## Test + + - is_empty() returns True when no params set + - is_empty() returns False when params are set + + ## Classes and Methods + + - EndpointQueryParams.is_empty() + """ + with does_not_raise(): + params = SampleEndpointParams() + assert params.is_empty() is True + + params.fabric_name = "Fabric1" + assert params.is_empty() is False + + +def test_query_params_00080(): + """ + # Summary + + Verify EndpointQueryParams _to_camel_case() static method + + ## Test + + - Correctly converts various snake_case strings to camelCase + + ## Classes and Methods + + - EndpointQueryParams._to_camel_case() + """ + with does_not_raise(): + assert EndpointQueryParams._to_camel_case("simple") == "simple" + assert EndpointQueryParams._to_camel_case("snake_case") == "snakeCase" + assert EndpointQueryParams._to_camel_case("long_snake_case_name") == "longSnakeCaseName" + assert EndpointQueryParams._to_camel_case("single") == "single" + + +# ============================================================================= +# Test: LuceneQueryParams +# ============================================================================= + + +def test_query_params_00100(): + """ + # Summary + + Verify LuceneQueryParams default values + + ## Test + + - All parameters default to None + + ## Classes and Methods + + - LuceneQueryParams.__init__() + """ + with does_not_raise(): + params = LuceneQueryParams() + assert params.filter is None + assert params.max is None + assert params.offset is None + assert params.sort is None + assert params.fields is None + + +def test_query_params_00110(): + """ + # Summary + + Verify LuceneQueryParams filter parameter + + ## Test + + - filter can be set to a string value + - to_query_string() includes filter parameter + + ## Classes and Methods + + - LuceneQueryParams.__init__() + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(filter="name:MyFabric") + result = params.to_query_string() + assert "filter=" in result + assert "name" in result + assert "MyFabric" in result + + +def test_query_params_00120(): + """ + # Summary + + Verify LuceneQueryParams max parameter + + ## Test + + - max can be set to an integer value + - to_query_string() includes max parameter + + ## Classes and Methods + + - LuceneQueryParams.__init__() + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(max=100) + result = params.to_query_string() + assert result == "max=100" + + +def test_query_params_00130(): + """ + # Summary + + Verify LuceneQueryParams offset parameter + + ## Test + + - offset can be set to an integer value + - to_query_string() includes offset parameter + + ## Classes and Methods + + - LuceneQueryParams.__init__() + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(offset=20) + result = params.to_query_string() + assert result == "offset=20" + + +def test_query_params_00140(): + """ + # Summary + + Verify LuceneQueryParams sort parameter + + ## Test + + - sort can be set to a valid string + - to_query_string() includes sort parameter + + ## Classes and Methods + + - LuceneQueryParams.__init__() + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(sort="name:asc") + result = params.to_query_string() + assert "sort=" in result + assert "name" in result + + +def test_query_params_00150(): + """ + # Summary + + Verify LuceneQueryParams fields parameter + + ## Test + + - fields can be set to a comma-separated string + - to_query_string() includes fields parameter + + ## Classes and Methods + + - LuceneQueryParams.__init__() + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(fields="name,id,status") + result = params.to_query_string() + assert "fields=" in result + + +def test_query_params_00160(): + """ + # Summary + + Verify LuceneQueryParams URL encoding + + ## Test + + - Special characters in filter are URL-encoded by default + + ## Classes and Methods + + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(filter="name:Fabric* AND status:active") + result = params.to_query_string(url_encode=True) + # Check for URL-encoded characters + assert "filter=" in result + # Space should be encoded + assert "%20" in result or "+" in result + + +def test_query_params_00170(): + """ + # Summary + + Verify LuceneQueryParams URL encoding can be disabled + + ## Test + + - url_encode=False preserves special characters + + ## Classes and Methods + + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(filter="name:Fabric* AND status:active") + result = params.to_query_string(url_encode=False) + assert result == "filter=name:Fabric* AND status:active" + + +def test_query_params_00180(): + """ + # Summary + + Verify LuceneQueryParams is_empty() method + + ## Test + + - is_empty() returns True when no params set + - is_empty() returns False when params are set + + ## Classes and Methods + + - LuceneQueryParams.is_empty() + """ + with does_not_raise(): + params = LuceneQueryParams() + assert params.is_empty() is True + + params.max = 100 + assert params.is_empty() is False + + +def test_query_params_00190(): + """ + # Summary + + Verify LuceneQueryParams multiple parameters + + ## Test + + - Multiple parameters are joined with '&' + - Parameters appear in expected order + + ## Classes and Methods + + - LuceneQueryParams.to_query_string() + """ + with does_not_raise(): + params = LuceneQueryParams(filter="name:*", max=50, offset=10, sort="name:asc") + result = params.to_query_string(url_encode=False) + assert "filter=name:*" in result + assert "max=50" in result + assert "offset=10" in result + assert "sort=name:asc" in result + + +# ============================================================================= +# Test: LuceneQueryParams validation +# ============================================================================= + + +def test_query_params_00200(): + """ + # Summary + + Verify LuceneQueryParams validates max range + + ## Test + + - max must be >= 1 + - max must be <= 10000 + + ## Classes and Methods + + - LuceneQueryParams.__init__() + """ + # Valid values + with does_not_raise(): + LuceneQueryParams(max=1) + LuceneQueryParams(max=10000) + LuceneQueryParams(max=500) + + # Invalid values + with pytest.raises(ValueError): + LuceneQueryParams(max=0) + + with pytest.raises(ValueError): + LuceneQueryParams(max=10001) + + +def test_query_params_00210(): + """ + # Summary + + Verify LuceneQueryParams validates offset range + + ## Test + + - offset must be >= 0 + + ## Classes and Methods + + - LuceneQueryParams.__init__() + """ + # Valid values + with does_not_raise(): + LuceneQueryParams(offset=0) + LuceneQueryParams(offset=100) + + # Invalid values + with pytest.raises(ValueError): + LuceneQueryParams(offset=-1) + + +def test_query_params_00220(): + """ + # Summary + + Verify LuceneQueryParams validates sort format + + ## Test + + - sort direction must be 'asc' or 'desc' + - Invalid directions are rejected + + ## Classes and Methods + + - LuceneQueryParams.validate_sort() + """ + # Valid values + with does_not_raise(): + LuceneQueryParams(sort="name:asc") + LuceneQueryParams(sort="name:desc") + LuceneQueryParams(sort="name:ASC") + LuceneQueryParams(sort="name:DESC") + + # Invalid direction + with pytest.raises(ValueError, match="Sort direction must be"): + LuceneQueryParams(sort="name:invalid") + + +def test_query_params_00230(): + """ + # Summary + + Verify LuceneQueryParams allows sort without direction + + ## Test + + - sort can be set without ':' separator + - Validation only applies when ':' is present + + ## Classes and Methods + + - LuceneQueryParams.validate_sort() + """ + with does_not_raise(): + params = LuceneQueryParams(sort="name") + result = params.to_query_string(url_encode=False) + assert result == "sort=name" + + +# ============================================================================= +# Test: CompositeQueryParams +# ============================================================================= + + +def test_query_params_00300(): + """ + # Summary + + Verify CompositeQueryParams basic instantiation + + ## Test + + - Instance can be created + - Starts with empty parameter groups + + ## Classes and Methods + + - CompositeQueryParams.__init__() + """ + with does_not_raise(): + composite = CompositeQueryParams() + assert composite.is_empty() is True + + +def test_query_params_00310(): + """ + # Summary + + Verify CompositeQueryParams add() method + + ## Test + + - Can add EndpointQueryParams + - Returns self for method chaining + + ## Classes and Methods + + - CompositeQueryParams.add() + """ + with does_not_raise(): + composite = CompositeQueryParams() + endpoint_params = SampleEndpointParams(fabric_name="Fabric1") + result = composite.add(endpoint_params) + assert result is composite + assert composite.is_empty() is False + + +def test_query_params_00320(): + """ + # Summary + + Verify CompositeQueryParams add() with LuceneQueryParams + + ## Test + + - Can add LuceneQueryParams + - Parameters are combined correctly + + ## Classes and Methods + + - CompositeQueryParams.add() + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + composite = CompositeQueryParams() + lucene_params = LuceneQueryParams(max=100) + composite.add(lucene_params) + result = composite.to_query_string() + assert result == "max=100" + + +def test_query_params_00330(): + """ + # Summary + + Verify CompositeQueryParams method chaining + + ## Test + + - Multiple add() calls can be chained + - All parameters are included in final query string + + ## Classes and Methods + + - CompositeQueryParams.add() + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + endpoint_params = SampleEndpointParams(fabric_name="Fabric1") + lucene_params = LuceneQueryParams(max=50) + + composite = CompositeQueryParams() + composite.add(endpoint_params).add(lucene_params) + + result = composite.to_query_string() + assert "fabricName=Fabric1" in result + assert "max=50" in result + + +def test_query_params_00340(): + """ + # Summary + + Verify CompositeQueryParams parameter ordering + + ## Test + + - Parameters appear in order they were added + - EndpointQueryParams before LuceneQueryParams + + ## Classes and Methods + + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + endpoint_params = SampleEndpointParams(fabric_name="Fabric1") + lucene_params = LuceneQueryParams(max=50) + + composite = CompositeQueryParams() + composite.add(endpoint_params).add(lucene_params) + + result = composite.to_query_string() + + # fabricName should appear before max + fabric_pos = result.index("fabricName") + max_pos = result.index("max") + assert fabric_pos < max_pos + + +def test_query_params_00350(): + """ + # Summary + + Verify CompositeQueryParams is_empty() method + + ## Test + + - is_empty() returns True when all groups are empty + - is_empty() returns False when any group has params + + ## Classes and Methods + + - CompositeQueryParams.is_empty() + """ + with does_not_raise(): + composite = CompositeQueryParams() + assert composite.is_empty() is True + + # Add empty parameter group + empty_params = SampleEndpointParams() + composite.add(empty_params) + assert composite.is_empty() is True + + # Add non-empty parameter group + endpoint_params = SampleEndpointParams(fabric_name="Fabric1") + composite.add(endpoint_params) + assert composite.is_empty() is False + + +def test_query_params_00360(): + """ + # Summary + + Verify CompositeQueryParams clear() method + + ## Test + + - clear() removes all parameter groups + - is_empty() returns True after clear() + + ## Classes and Methods + + - CompositeQueryParams.clear() + - CompositeQueryParams.is_empty() + """ + with does_not_raise(): + composite = CompositeQueryParams() + endpoint_params = SampleEndpointParams(fabric_name="Fabric1") + composite.add(endpoint_params) + + assert composite.is_empty() is False + + composite.clear() + assert composite.is_empty() is True + + +def test_query_params_00370(): + """ + # Summary + + Verify CompositeQueryParams URL encoding propagation + + ## Test + + - url_encode parameter is passed to LuceneQueryParams + - EndpointQueryParams not affected (no url_encode parameter) + + ## Classes and Methods + + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + endpoint_params = SampleEndpointParams(fabric_name="My Fabric") + lucene_params = LuceneQueryParams(filter="name:Test Value") + + composite = CompositeQueryParams() + composite.add(endpoint_params).add(lucene_params) + + # With URL encoding + result_encoded = composite.to_query_string(url_encode=True) + assert "filter=" in result_encoded + + # Without URL encoding + result_plain = composite.to_query_string(url_encode=False) + assert "filter=name:Test Value" in result_plain + + +def test_query_params_00380(): + """ + # Summary + + Verify CompositeQueryParams with empty groups + + ## Test + + - Empty parameter groups are skipped in query string + - Only non-empty groups contribute to query string + + ## Classes and Methods + + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + empty_endpoint = SampleEndpointParams() + non_empty_lucene = LuceneQueryParams(max=100) + + composite = CompositeQueryParams() + composite.add(empty_endpoint).add(non_empty_lucene) + + result = composite.to_query_string() + + # Should only contain the Lucene params + assert result == "max=100" + + +# ============================================================================= +# Test: Integration scenarios +# ============================================================================= + + +def test_query_params_00400(): + """ + # Summary + + Verify complex query string composition + + ## Test + + - Combine multiple EndpointQueryParams with LuceneQueryParams + - All parameters are correctly formatted and encoded + + ## Classes and Methods + + - CompositeQueryParams.add() + - CompositeQueryParams.to_query_string() + """ + with does_not_raise(): + endpoint_params = SampleEndpointParams(force_show_run=BooleanStringEnum.TRUE, fabric_name="Production", switch_count=5) + + lucene_params = LuceneQueryParams(filter="status:active AND role:leaf", max=100, offset=0, sort="name:asc") + + composite = CompositeQueryParams() + composite.add(endpoint_params).add(lucene_params) + + result = composite.to_query_string(url_encode=False) + + # Verify all parameters present + assert "forceShowRun=true" in result + assert "fabricName=Production" in result + assert "switchCount=5" in result + assert "filter=status:active AND role:leaf" in result + assert "max=100" in result + assert "offset=0" in result + assert "sort=name:asc" in result From b1fe93995ab9249deca00e7897bf6a90d199e882 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 12 Mar 2026 23:47:21 +0530 Subject: [PATCH 004/109] Add Hostname/DNS support in lieu of IP + Handle Switch Inconsistent State Handling --- .../nd_manage_switches/config_models.py | 28 ++- .../nd_manage_switches/switch_data_models.py | 4 - plugins/module_utils/nd_switch_resources.py | 198 +++++++++++++++--- 3 files changed, 193 insertions(+), 37 deletions(-) diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index 1ed4aa72..20023b0e 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -16,6 +16,7 @@ __metaclass__ = type +import socket from ipaddress import ip_address, ip_interface from pydantic import Field, ValidationInfo, computed_field, field_validator, model_validator from typing import Any, Dict, List, Optional, ClassVar, Literal, Union @@ -526,24 +527,37 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: @field_validator('seed_ip', mode='before') @classmethod def validate_seed_ip(cls, v: str) -> str: - """Validate seed IP is valid IP address or DNS name.""" + """Resolve seed_ip to an IP address. + + Accepts IPv4, IPv6, or a DNS name / hostname. When the input + is not a valid IP address a DNS lookup is performed and the + resolved IPv4 address is returned so that downstream code + always works with a clean IP. + """ if not v or not v.strip(): raise ValueError("seed_ip cannot be empty") v = v.strip() - # Try to validate as IP address first + # Fast path: already a valid IP address try: ip_address(v) return v except ValueError: pass - # If not an IP, assume it's a DNS name - basic validation - if not v.replace('-', '').replace('.', '').replace('_', '').isalnum(): - raise ValueError(f"Invalid seed_ip: {v}. Must be a valid IP address or DNS name") - - return v + # Not an IP — attempt DNS resolution (IPv4 first, then IPv6) + for family in (socket.AF_INET, socket.AF_INET6): + try: + addr_info = socket.getaddrinfo(v, None, family) + if addr_info: + return addr_info[0][4][0] + except socket.gaierror: + continue + + raise ValueError( + f"'{v}' is not a valid IP address and could not be resolved via DNS" + ) @field_validator('poap', 'rma', mode='before') @classmethod diff --git a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py index 5afc6117..08147ce4 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py @@ -382,10 +382,6 @@ class SwitchDataModel(NDBaseModel): default=None, alias="switchRole" ) - mode: Optional[str] = Field( - default=None, - description="Switch mode (Normal, Migration, etc.)" - ) system_up_time: Optional[str] = Field( default=None, alias="systemUpTime", diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 3d9a7f69..625651b7 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -30,6 +30,7 @@ PlatformType, DiscoveryStatus, SystemMode, + ConfigSyncStatus, SwitchDiscoveryModel, SwitchDataModel, AddSwitchesRequestModel, @@ -232,6 +233,7 @@ def compute_changes( changes: Dict[str, list] = { "to_add": [], "to_update": [], + "role_change": [], "to_delete": [], "migration_mode": [], "idempotent": [], @@ -257,12 +259,13 @@ def compute_changes( changes["to_add"].append(prop_sw) continue + log.debug(f"Switch {ip} (id={sid}) found in existing with {match_key} match {existing_sw}") log.debug( f"Switch {ip} matched existing by {match_key} " f"(existing_id={existing_sw.switch_id})" ) - if existing_sw.mode == "Migration": + if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: log.info( f"Switch {ip} ({existing_sw.switch_id}) is in Migration mode" ) @@ -284,16 +287,24 @@ def compute_changes( k for k in set(prop_dict) | set(existing_dict) if prop_dict.get(k) != existing_dict.get(k) } - log.info( - f"Switch {ip} has differences — marking to_update. " - f"Changed fields: {diff_keys}" - ) - log.debug( - f"Switch {ip} diff detail — " - f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " - f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" - ) - changes["to_update"].append(prop_sw) + if diff_keys == {"switch_role"}: + log.info( + f"Switch {ip} has role-only difference — marking role_change. " + f"proposed: {prop_dict.get('switch_role')}, " + f"existing: {existing_dict.get('switch_role')}" + ) + changes["role_change"].append(prop_sw) + else: + log.info( + f"Switch {ip} has differences — marking to_update. " + f"Changed fields: {diff_keys}" + ) + log.debug( + f"Switch {ip} diff detail — " + f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " + f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" + ) + changes["to_update"].append(prop_sw) # Switches in existing but not in proposed (for overridden state) proposed_ids = {sw.switch_id for sw in proposed} @@ -309,6 +320,7 @@ def compute_changes( f"Compute changes summary: " f"to_add={len(changes['to_add'])}, " f"to_update={len(changes['to_update'])}, " + f"role_change={len(changes['role_change'])}, " f"to_delete={len(changes['to_delete'])}, " f"migration_mode={len(changes['migration_mode'])}, " f"idempotent={len(changes['idempotent'])}" @@ -2273,21 +2285,16 @@ def _handle_merged_state( return config_by_ip = {sw.seed_ip: sw for sw in proposed_config} + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} - # Phase 1: Log idempotent switches - for sw in diff.get("idempotent", []): - self.log.info( - f"Switch {sw.fabric_management_ip} ({sw.switch_id}) " - f"is idempotent - no changes needed" - ) + # Phase 1: Handle role-change switches + self._merged_handle_role_changes(diff, config_by_ip, existing_by_ip) - # Phase 2: Warn about to_update (merged state doesn't support updates) - if diff.get("to_update"): - ips = [sw.fabric_management_ip for sw in diff["to_update"]] - self.log.warning( - f"Switches require updates which is not supported in merged state. " - f"Use overridden state for updates. Affected switches: {ips}" - ) + # Phase 2: Handle idempotent switches that may need config sync + self._merged_handle_idempotent(diff, existing_by_ip) + + # Phase 3: Fail on to_update (merged state doesn't support updates) + self._merged_handle_to_update(diff) switches_to_add = diff.get("to_add", []) migration_switches = diff.get("migration_mode", []) @@ -2317,7 +2324,7 @@ def _handle_merged_state( # Collect (serial_number, SwitchConfigModel) pairs for post-processing switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - # Phase 3: Bulk add new switches to fabric + # Phase 4: Bulk add new switches to fabric if switches_to_add and discovered_data: add_configs = [] for sw in switches_to_add: @@ -2361,7 +2368,8 @@ def _handle_merged_state( switch_actions.append((sn, cfg)) self._log_operation("add", cfg.seed_ip) - # Phase 4: Collect migration switches for post-processing + # Phase 5: Collect migration switches for post-processing + # Migration mode switches get role updates during post-add processing. for mig_sw in migration_switches: cfg = config_by_ip.get(mig_sw.fabric_management_ip) if cfg and mig_sw.switch_id: @@ -2396,6 +2404,140 @@ def _handle_merged_state( self.log.debug("EXIT: _handle_merged_state() - completed") + # ----------------------------------------------------------------- + # Merged-state sub-handlers (modular phases) + # ----------------------------------------------------------------- + + def _merged_handle_role_changes( + self, + diff: Dict[str, List[SwitchDataModel]], + config_by_ip: Dict[str, SwitchConfigModel], + existing_by_ip: Dict[str, SwitchDataModel], + ) -> None: + """Handle role-change switches in merged state. + + Role changes are only allowed when configSyncStatus is notApplicable. + Any other status fails the module. + + Args: + diff: Categorized switch diff output. + config_by_ip: Config lookup by seed IP. + existing_by_ip: Existing switch lookup by management IP. + + Returns: + None. + """ + role_change_switches = diff.get("role_change", []) + if not role_change_switches: + return + + # Validate configSyncStatus for every role-change switch + for sw in role_change_switches: + existing_sw = existing_by_ip.get(sw.fabric_management_ip) + status = ( + existing_sw.additional_data.config_sync_status + if existing_sw and existing_sw.additional_data + else None + ) + if status != ConfigSyncStatus.NOT_APPLICABLE: + self.nd.module.fail_json( + msg=( + f"Role change not possible for switch " + f"{sw.fabric_management_ip} ({sw.switch_id}). " + f"configSyncStatus is " + f"'{status.value if status else 'unknown'}', " + f"expected '{ConfigSyncStatus.NOT_APPLICABLE.value}'." + ) + ) + + # Build (switch_id, SwitchConfigModel) pairs and apply role change + role_actions: List[Tuple[str, SwitchConfigModel]] = [] + for sw in role_change_switches: + cfg = config_by_ip.get(sw.fabric_management_ip) + if cfg and sw.switch_id: + role_actions.append((sw.switch_id, cfg)) + + if role_actions: + self.log.info( + f"Performing role change for {len(role_actions)} switch(es)" + ) + self.fabric_ops.bulk_update_roles(role_actions) + self.fabric_ops.finalize() + + def _merged_handle_idempotent( + self, + diff: Dict[str, List[SwitchDataModel]], + existing_by_ip: Dict[str, SwitchDataModel], + ) -> None: + """Handle idempotent switches that may need config save and deploy. + + If configSyncStatus is anything other than inSync, run config save + and deploy to bring the switch back in sync. + + Args: + diff: Categorized switch diff output. + existing_by_ip: Existing switch lookup by management IP. + + Returns: + None. + """ + idempotent_switches = diff.get("idempotent", []) + if not idempotent_switches: + return + + finalize_needed = False + for sw in idempotent_switches: + existing_sw = existing_by_ip.get(sw.fabric_management_ip) + status = ( + existing_sw.additional_data.config_sync_status + if existing_sw and existing_sw.additional_data + else None + ) + if status != ConfigSyncStatus.IN_SYNC: + self.log.info( + f"Switch {sw.fabric_management_ip} ({sw.switch_id}) is " + f"config-idempotent but configSyncStatus is " + f"'{status.value if status else 'unknown'}' — " + f"will run config save and deploy" + ) + finalize_needed = True + else: + self.log.info( + f"Switch {sw.fabric_management_ip} ({sw.switch_id}) " + f"is idempotent — no changes needed" + ) + + if finalize_needed: + self.fabric_ops.finalize() + + def _merged_handle_to_update( + self, + diff: Dict[str, List[SwitchDataModel]], + ) -> None: + """Fail the module if switches require field-level updates. + + Merged state does not support in-place updates beyond role changes. + Use overridden state which performs delete-and-re-add. + + Args: + diff: Categorized switch diff output. + + Returns: + None. + """ + to_update = diff.get("to_update", []) + if not to_update: + return + + ips = [sw.fabric_management_ip for sw in to_update] + self.nd.module.fail_json( + msg=( + f"Switches require updates that are not supported in merged state. " + f"Use 'overridden' state for in-place updates. " + f"Affected switches: {ips}" + ) + ) + def _handle_overridden_state( self, diff: Dict[str, List[SwitchDataModel]], @@ -2419,6 +2561,10 @@ def _handle_overridden_state( self.log.warning("No configurations provided for overridden state") return + # Merge role_change into to_update — overridden uses delete-and-re-add + diff["to_update"].extend(diff.get("role_change", [])) + diff["role_change"] = [] + # Check mode — preview only if self.nd.module.check_mode: n_delete = len(diff.get("to_delete", [])) From 2a135aeeda1581dd3f58637bcd33cc92f8897110 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 13 Mar 2026 00:55:20 +0530 Subject: [PATCH 005/109] Update Endpoints Inheritance, Directory Structure and Imports --- .../nd_manage_switches/credentials.py} | 4 +- .../nd_manage_switches/fabric_bootstrap.py} | 31 ++-- .../nd_manage_switches/fabric_config.py} | 56 ++++---- .../nd_manage_switches/fabric_discovery.py} | 28 +++- .../fabric_switch_actions.py} | 132 +++++++++--------- .../nd_manage_switches/fabric_switches.py} | 6 +- .../nd_manage_switches/config_models.py | 17 +-- .../nd_manage_switches/bootstrap_utils.py | 2 +- .../utils/nd_manage_switches/fabric_utils.py | 2 +- .../nd_manage_switches/switch_wait_utils.py | 6 +- 10 files changed, 155 insertions(+), 129 deletions(-) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_credentials.py => manage/nd_manage_switches/credentials.py} (97%) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_fabric_bootstrap.py => manage/nd_manage_switches/fabric_bootstrap.py} (87%) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_fabric_config.py => manage/nd_manage_switches/fabric_config.py} (86%) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_fabric_discovery.py => manage/nd_manage_switches/fabric_discovery.py} (80%) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_fabric_switch_actions.py => manage/nd_manage_switches/fabric_switch_actions.py} (88%) rename plugins/module_utils/endpoints/v1/{nd_manage_switches/manage_fabric_switches.py => manage/nd_manage_switches/fabric_switches.py} (97%) diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py similarity index 97% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py index 9007be8d..242948a7 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_credentials.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py @@ -29,7 +29,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -77,7 +77,7 @@ class _V1ManageCredentialsSwitchesBase(BaseModel): @property def _base_path(self) -> str: """Build the base endpoint path.""" - return BasePath.nd_manage("credentials", "switches") + return BasePath.path("credentials", "switches") class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py similarity index 87% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py index 48212482..d2e07828 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py @@ -29,7 +29,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -68,7 +68,25 @@ class FabricBootstrapEndpointParams(EndpointQueryParams): filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") -class V1ManageFabricBootstrapGet(FabricNameMixin, BaseModel): +class _V1ManageFabricBootstrapBase(FabricNameMixin, BaseModel): + """ + Base class for Fabric Bootstrap endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/bootstrap endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "bootstrap") + + +class V1ManageFabricBootstrapGet(_V1ManageFabricBootstrapBase): """ # Summary @@ -113,8 +131,6 @@ class V1ManageFabricBootstrapGet(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -137,13 +153,10 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "bootstrap") query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{self._base_path}?{query_string}" + return self._base_path @property def verb(self) -> HttpVerbEnum: diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py similarity index 86% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py index bb037e1e..078afc6c 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_config.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py @@ -32,7 +32,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -69,7 +69,25 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") -class V1ManageFabricConfigSavePost(FabricNameMixin, BaseModel): +class _V1ManageFabricConfigBase(FabricNameMixin, BaseModel): + """ + Base class for Fabric Config endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName} endpoint family. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name) + + +class V1ManageFabricConfigSavePost(_V1ManageFabricConfigBase): """ # Summary @@ -97,8 +115,6 @@ class V1ManageFabricConfigSavePost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -110,9 +126,7 @@ class V1ManageFabricConfigSavePost(FabricNameMixin, BaseModel): @property def path(self) -> str: """Build the endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name, "actions", "configSave") + return f"{self._base_path}/actions/configSave" @property def verb(self) -> HttpVerbEnum: @@ -120,7 +134,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricConfigDeployPost(FabricNameMixin, BaseModel): +class V1ManageFabricConfigDeployPost(_V1ManageFabricConfigBase): """ # Summary @@ -163,8 +177,6 @@ class V1ManageFabricConfigDeployPost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -187,13 +199,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "actions", "configDeploy") + base = f"{self._base_path}/actions/configDeploy" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -201,7 +211,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricGet(FabricNameMixin, BaseModel): +class V1ManageFabricGet(_V1ManageFabricConfigBase): """ # Summary @@ -229,8 +239,6 @@ class V1ManageFabricGet(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -242,9 +250,7 @@ class V1ManageFabricGet(FabricNameMixin, BaseModel): @property def path(self) -> str: """Build the endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name) + return self._base_path @property def verb(self) -> HttpVerbEnum: @@ -252,7 +258,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.GET -class V1ManageFabricInventoryDiscoverGet(FabricNameMixin, BaseModel): +class V1ManageFabricInventoryDiscoverGet(_V1ManageFabricConfigBase): """ # Summary @@ -280,8 +286,6 @@ class V1ManageFabricInventoryDiscoverGet(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -293,9 +297,7 @@ class V1ManageFabricInventoryDiscoverGet(FabricNameMixin, BaseModel): @property def path(self) -> str: """Build the endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name, "inventory", "discover") + return f"{self._base_path}/inventory/discover" @property def verb(self) -> HttpVerbEnum: diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py similarity index 80% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py index d7d2e1f2..928b4b67 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_discovery.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py @@ -26,7 +26,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( FabricNameMixin, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -39,7 +39,25 @@ COMMON_CONFIG = ConfigDict(validate_assignment=True) -class V1ManageFabricShallowDiscoveryPost(FabricNameMixin, BaseModel): +class _V1ManageFabricDiscoveryBase(FabricNameMixin, BaseModel): + """ + Base class for Fabric Discovery endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "actions", "shallowDiscovery") + + +class V1ManageFabricShallowDiscoveryPost(_V1ManageFabricDiscoveryBase): """ # Summary @@ -67,8 +85,6 @@ class V1ManageFabricShallowDiscoveryPost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -80,9 +96,7 @@ class V1ManageFabricShallowDiscoveryPost(FabricNameMixin, BaseModel): @property def path(self) -> str: """Build the endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name, "actions", "shallowDiscovery") + return self._base_path @property def verb(self) -> HttpVerbEnum: diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py similarity index 88% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py index 73aa93ea..6b90f160 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switch_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py @@ -35,7 +35,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -128,7 +128,25 @@ class SwitchActionsImportEndpointParams(EndpointQueryParams): # ============================================================================ -class V1ManageFabricSwitchActionsRemovePost(FabricNameMixin, BaseModel): +class _V1ManageFabricSwitchActionsBase(FabricNameMixin, BaseModel): + """ + Base class for Fabric Switch Actions endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switchActions endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switchActions") + + +class V1ManageFabricSwitchActionsRemovePost(_V1ManageFabricSwitchActionsBase): """ # Summary @@ -172,8 +190,6 @@ class V1ManageFabricSwitchActionsRemovePost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -196,13 +212,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "remove") + base = f"{self._base_path}/remove" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -210,7 +224,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricSwitchActionsChangeRolesPost(FabricNameMixin, BaseModel): +class V1ManageFabricSwitchActionsChangeRolesPost(_V1ManageFabricSwitchActionsBase): """ # Summary @@ -252,8 +266,6 @@ class V1ManageFabricSwitchActionsChangeRolesPost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -277,13 +289,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "changeRoles") + base = f"{self._base_path}/changeRoles" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -291,7 +301,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricSwitchActionsImportBootstrapPost(FabricNameMixin, BaseModel): +class V1ManageFabricSwitchActionsImportBootstrapPost(_V1ManageFabricSwitchActionsBase): """ # Summary @@ -335,8 +345,6 @@ class V1ManageFabricSwitchActionsImportBootstrapPost(FabricNameMixin, BaseModel) ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -359,13 +367,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "importBootstrap") + base = f"{self._base_path}/importBootstrap" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -378,7 +384,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class V1ManageFabricSwitchActionsPreProvisionPost(FabricNameMixin, BaseModel): +class V1ManageFabricSwitchActionsPreProvisionPost(_V1ManageFabricSwitchActionsBase): """ # Summary @@ -425,8 +431,6 @@ class V1ManageFabricSwitchActionsPreProvisionPost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -450,13 +454,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "preProvision") + base = f"{self._base_path}/preProvision" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -469,7 +471,27 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class V1ManageFabricSwitchProvisionRMAPost(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): +class _V1ManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): + """ + Base class for per-switch action endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions endpoint. + """ + + model_config = COMMON_CONFIG + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") + + +class V1ManageFabricSwitchProvisionRMAPost(_V1ManageFabricSwitchActionsPerSwitchBase): """ # Summary @@ -513,8 +535,6 @@ class V1ManageFabricSwitchProvisionRMAPost(FabricNameMixin, SwitchSerialNumberMi ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -537,17 +557,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - if self.switch_sn is None: - raise ValueError("switch_sn must be set before accessing path") - base_path = BasePath.nd_manage( - "fabrics", self.fabric_name, "switches", self.switch_sn, "actions", "provisionRMA" - ) + base = f"{self._base_path}/provisionRMA" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -582,7 +596,7 @@ class SwitchActionsClusterEndpointParams(EndpointQueryParams): cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") -class V1ManageFabricSwitchChangeSerialNumberPost(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): +class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPerSwitchBase): """ # Summary @@ -626,8 +640,6 @@ class V1ManageFabricSwitchChangeSerialNumberPost(FabricNameMixin, SwitchSerialNu ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -650,17 +662,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - if self.switch_sn is None: - raise ValueError("switch_sn must be set before accessing path") - base_path = BasePath.nd_manage( - "fabrics", self.fabric_name, "switches", self.switch_sn, "actions", "changeSwitchSerialNumber" - ) + base = f"{self._base_path}/changeSwitchSerialNumber" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: @@ -673,7 +679,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class V1ManageFabricSwitchActionsRediscoverPost(FabricNameMixin, BaseModel): +class V1ManageFabricSwitchActionsRediscoverPost(_V1ManageFabricSwitchActionsBase): """ # Summary @@ -715,8 +721,6 @@ class V1ManageFabricSwitchActionsRediscoverPost(FabricNameMixin, BaseModel): ``` """ - model_config = COMMON_CONFIG - # Version metadata api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") @@ -740,13 +744,11 @@ def path(self) -> str: - Complete endpoint path string, optionally including query parameters """ - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - base_path = BasePath.nd_manage("fabrics", self.fabric_name, "switchActions", "rediscover") + base = f"{self._base_path}/rediscover" query_string = self.endpoint_params.to_query_string() if query_string: - return f"{base_path}?{query_string}" - return base_path + return f"{base}?{query_string}" + return base @property def verb(self) -> HttpVerbEnum: diff --git a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py similarity index 97% rename from plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py rename to plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py index b771fb1d..9594a64c 100644 --- a/plugins/module_utils/endpoints/v1/nd_manage_switches/manage_fabric_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py @@ -31,7 +31,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.base_paths_manage import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -111,7 +111,7 @@ def _base_path(self) -> str: """Build the base endpoint path.""" if self.fabric_name is None: raise ValueError("fabric_name must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name, "switches") + return BasePath.path("fabrics", self.fabric_name, "switches") class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): @@ -287,4 +287,4 @@ def _base_path(self) -> str: raise ValueError("fabric_name must be set before accessing path") if self.switch_sn is None: raise ValueError("switch_sn must be set before accessing path") - return BasePath.nd_manage("fabrics", self.fabric_name, "switches", self.switch_sn) + return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn) diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index 20023b0e..4dca8c6b 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -17,7 +17,7 @@ __metaclass__ = type import socket -from ipaddress import ip_address, ip_interface +from ipaddress import ip_address from pydantic import Field, ValidationInfo, computed_field, field_validator, model_validator from typing import Any, Dict, List, Optional, ClassVar, Literal, Union from typing_extensions import Self @@ -58,11 +58,7 @@ def validate_gateway(cls, v: str) -> str: """Validate gateway is a valid CIDR.""" if not v or not v.strip(): raise ValueError("gateway cannot be empty") - try: - ip_interface(v.strip()) - except ValueError as e: - raise ValueError(f"Invalid gateway IP address with mask: {v}") from e - return v.strip() + return SwitchValidators.validate_cidr(v) class POAPConfigModel(NDNestedModel): @@ -209,9 +205,7 @@ def validate_discovery_credentials_pair(self) -> Self: @classmethod def validate_serial_numbers(cls, v: Optional[str]) -> Optional[str]: """Validate serial numbers are not empty strings.""" - if v is not None and not v.strip(): - raise ValueError("Serial number cannot be empty") - return v + return SwitchValidators.validate_serial_number(v) class RMAConfigModel(NDNestedModel): @@ -280,9 +274,10 @@ class RMAConfigModel(NDNestedModel): @classmethod def validate_serial_numbers(cls, v: str) -> str: """Validate serial numbers are not empty.""" - if not v or not v.strip(): + result = SwitchValidators.validate_serial_number(v) + if result is None: raise ValueError("Serial number cannot be empty") - return v.strip() + return result @model_validator(mode='after') def validate_discovery_credentials_pair(self) -> Self: diff --git a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py index 1356428a..89904696 100644 --- a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py @@ -14,7 +14,7 @@ import logging from typing import Any, Dict, List, Optional -from ...endpoints.v1.nd_manage_switches.manage_fabric_bootstrap import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_bootstrap import ( V1ManageFabricBootstrapGet, ) diff --git a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py index e1d6e4a7..1fb8da7e 100644 --- a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py @@ -14,7 +14,7 @@ import time from typing import Any, Dict, Optional -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_config import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( V1ManageFabricConfigDeployPost, V1ManageFabricConfigSavePost, V1ManageFabricGet, diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py index 5f9350ae..31665ee7 100644 --- a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py @@ -14,13 +14,13 @@ import time from typing import Any, Dict, List, Optional -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_config import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( V1ManageFabricInventoryDiscoverGet, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( V1ManageFabricSwitchesGet, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.nd_manage_switches.manage_fabric_switch_actions import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( V1ManageFabricSwitchActionsRediscoverPost, ) From 081afe06859ea5ba55e083cb3f22db8c035f3916 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 13 Mar 2026 00:58:02 +0530 Subject: [PATCH 006/109] Update Module Imports --- plugins/module_utils/nd_switch_resources.py | 22 ++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 625651b7..3894de7e 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -21,10 +21,10 @@ from pydantic import ValidationError -from .nd_v2 import NDModule -from .enums import OperationType -from .rest.results import Results -from .models.nd_manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches import ( SwitchRole, SnmpV3AuthProtocol, PlatformType, @@ -46,7 +46,7 @@ POAPConfigModel, RMAConfigModel, ) -from .utils.nd_manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.utils.nd_manage_switches import ( FabricUtils, SwitchWaitUtils, SwitchOperationError, @@ -57,12 +57,14 @@ build_bootstrap_index, build_poap_data_block, ) -from .endpoints.v1.nd_manage_switches.manage_fabric_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( V1ManageFabricSwitchesGet, V1ManageFabricSwitchesPost, ) -from .endpoints.v1.nd_manage_switches.manage_fabric_discovery import V1ManageFabricShallowDiscoveryPost -from .endpoints.v1.nd_manage_switches.manage_fabric_switch_actions import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_discovery import ( + V1ManageFabricShallowDiscoveryPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( V1ManageFabricSwitchProvisionRMAPost, V1ManageFabricSwitchActionsImportBootstrapPost, V1ManageFabricSwitchActionsPreProvisionPost, @@ -70,7 +72,9 @@ V1ManageFabricSwitchActionsChangeRolesPost, V1ManageFabricSwitchChangeSerialNumberPost, ) -from .endpoints.v1.nd_manage_switches.manage_credentials import V1ManageCredentialsSwitchesPost +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.credentials import ( + V1ManageCredentialsSwitchesPost, +) # ========================================================================= From f5b16557001ed0b7f35ef09a2e9b610a6491a07b Mon Sep 17 00:00:00 2001 From: Allen Robel Date: Thu, 12 Mar 2026 13:30:35 -1000 Subject: [PATCH 007/109] [ignore] Add RestSend framework, enums, and shared unit test infrastructure (#185) --- plugins/module_utils/__init__.py | 0 plugins/module_utils/common/exceptions.py | 146 ++ plugins/module_utils/nd_v2.py | 317 ++++ plugins/module_utils/rest/__init__.py | 0 .../module_utils/rest/protocols/__init__.py | 0 .../rest/protocols/response_handler.py | 138 ++ .../rest/protocols/response_validation.py | 197 +++ plugins/module_utils/rest/protocols/sender.py | 103 ++ .../module_utils/rest/response_handler_nd.py | 401 +++++ .../rest/response_strategies/__init__.py | 0 .../response_strategies/nd_v1_strategy.py | 267 +++ plugins/module_utils/rest/rest_send.py | 819 +++++++++ plugins/module_utils/rest/results.py | 1178 +++++++++++++ plugins/module_utils/rest/sender_nd.py | 322 ++++ tests/sanity/requirements.txt | 6 +- .../fixtures/fixture_data/test_rest_send.json | 244 +++ .../module_utils/fixtures/load_fixture.py | 46 + .../unit/module_utils/mock_ansible_module.py | 95 + tests/unit/module_utils/response_generator.py | 60 + tests/unit/module_utils/sender_file.py | 293 ++++ .../module_utils/test_response_handler_nd.py | 1496 ++++++++++++++++ tests/unit/module_utils/test_rest_send.py | 1551 +++++++++++++++++ tests/unit/module_utils/test_results.py | 362 ++++ tests/unit/module_utils/test_sender_nd.py | 906 ++++++++++ 24 files changed, 8944 insertions(+), 3 deletions(-) create mode 100644 plugins/module_utils/__init__.py create mode 100644 plugins/module_utils/common/exceptions.py create mode 100644 plugins/module_utils/nd_v2.py create mode 100644 plugins/module_utils/rest/__init__.py create mode 100644 plugins/module_utils/rest/protocols/__init__.py create mode 100644 plugins/module_utils/rest/protocols/response_handler.py create mode 100644 plugins/module_utils/rest/protocols/response_validation.py create mode 100644 plugins/module_utils/rest/protocols/sender.py create mode 100644 plugins/module_utils/rest/response_handler_nd.py create mode 100644 plugins/module_utils/rest/response_strategies/__init__.py create mode 100644 plugins/module_utils/rest/response_strategies/nd_v1_strategy.py create mode 100644 plugins/module_utils/rest/rest_send.py create mode 100644 plugins/module_utils/rest/results.py create mode 100644 plugins/module_utils/rest/sender_nd.py create mode 100644 tests/unit/module_utils/fixtures/fixture_data/test_rest_send.json create mode 100644 tests/unit/module_utils/fixtures/load_fixture.py create mode 100644 tests/unit/module_utils/mock_ansible_module.py create mode 100644 tests/unit/module_utils/response_generator.py create mode 100644 tests/unit/module_utils/sender_file.py create mode 100644 tests/unit/module_utils/test_response_handler_nd.py create mode 100644 tests/unit/module_utils/test_rest_send.py create mode 100644 tests/unit/module_utils/test_results.py create mode 100644 tests/unit/module_utils/test_sender_nd.py diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/common/exceptions.py b/plugins/module_utils/common/exceptions.py new file mode 100644 index 00000000..16e31ac6 --- /dev/null +++ b/plugins/module_utils/common/exceptions.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# exceptions.py + +Exception classes for the cisco.nd Ansible collection. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +from typing import Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, +) + + +class NDErrorData(BaseModel): + """ + # Summary + + Pydantic model for structured error data from NDModule requests. + + This model provides type-safe error information that can be serialized + to a dict for use with Ansible's fail_json. + + ## Attributes + + - msg: Human-readable error message (required) + - status: HTTP status code as integer (optional) + - request_payload: Request payload that was sent (optional) + - response_payload: Response payload from controller (optional) + - raw: Raw response content for non-JSON responses (optional) + + ## Raises + + - None + """ + + model_config = ConfigDict(extra="forbid") + + msg: str + status: Optional[int] = None + request_payload: Optional[dict[str, Any]] = None + response_payload: Optional[dict[str, Any]] = None + raw: Optional[Any] = None + + +class NDModuleError(Exception): + """ + # Summary + + Exception raised by NDModule when a request fails. + + This exception wraps an NDErrorData Pydantic model, providing structured + error information that can be used by callers to build appropriate error + responses (e.g., Ansible fail_json). + + ## Usage Example + + ```python + try: + data = nd.request("/api/v1/endpoint", HttpVerbEnum.POST, payload) + except NDModuleError as e: + print(f"Error: {e.msg}") + print(f"Status: {e.status}") + if e.response_payload: + print(f"Response: {e.response_payload}") + # Use to_dict() for fail_json + module.fail_json(**e.to_dict()) + ``` + + ## Raises + + - None + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + msg: str, + status: Optional[int] = None, + request_payload: Optional[dict[str, Any]] = None, + response_payload: Optional[dict[str, Any]] = None, + raw: Optional[Any] = None, + ) -> None: + self.error_data = NDErrorData( + msg=msg, + status=status, + request_payload=request_payload, + response_payload=response_payload, + raw=raw, + ) + super().__init__(msg) + + @property + def msg(self) -> str: + """Human-readable error message.""" + return self.error_data.msg + + @property + def status(self) -> Optional[int]: + """HTTP status code.""" + return self.error_data.status + + @property + def request_payload(self) -> Optional[dict[str, Any]]: + """Request payload that was sent.""" + return self.error_data.request_payload + + @property + def response_payload(self) -> Optional[dict[str, Any]]: + """Response payload from controller.""" + return self.error_data.response_payload + + @property + def raw(self) -> Optional[Any]: + """Raw response content for non-JSON responses.""" + return self.error_data.raw + + def to_dict(self) -> dict[str, Any]: + """ + # Summary + + Convert exception attributes to a dict for use with fail_json. + + Returns a dict containing only non-None attributes. + + ## Raises + + - None + """ + return self.error_data.model_dump(exclude_none=True) diff --git a/plugins/module_utils/nd_v2.py b/plugins/module_utils/nd_v2.py new file mode 100644 index 00000000..0a3fe61a --- /dev/null +++ b/plugins/module_utils/nd_v2.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# nd_v2.py + +Simplified NDModule using RestSend infrastructure with exception-based error handling. + +This module provides a streamlined interface for interacting with Nexus Dashboard +controllers. Unlike the original nd.py which uses Ansible's fail_json/exit_json, +this module raises Python exceptions, making it: + +- Easier to unit test +- Reusable with non-Ansible code (e.g., raw Python Requests) +- More Pythonic in error handling + +## Usage Example + +```python +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( + NDModule, + NDModuleError, + nd_argument_spec, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + +def main(): + argument_spec = nd_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + nd = NDModule(module) + + try: + data = nd.request("/api/v1/some/endpoint", HttpVerbEnum.GET) + module.exit_json(changed=False, data=data) + except NDModuleError as e: + module.fail_json(msg=e.msg, status=e.status, response_payload=e.response_payload) +``` +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import logging +from typing import Any, Optional + +from ansible.module_utils.basic import env_fallback +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDModuleError +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.protocols.response_handler import ResponseHandlerProtocol +from ansible_collections.cisco.nd.plugins.module_utils.rest.protocols.sender import SenderProtocol +from ansible_collections.cisco.nd.plugins.module_utils.rest.response_handler_nd import ResponseHandler +from ansible_collections.cisco.nd.plugins.module_utils.rest.rest_send import RestSend +from ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd import Sender + + +def nd_argument_spec() -> dict[str, Any]: + """ + Return the common argument spec for ND modules. + + This function provides the standard arguments that all ND modules + should accept for connection and authentication. + """ + return dict( + host=dict(type="str", required=False, aliases=["hostname"], fallback=(env_fallback, ["ND_HOST"])), + port=dict(type="int", required=False, fallback=(env_fallback, ["ND_PORT"])), + username=dict(type="str", fallback=(env_fallback, ["ND_USERNAME", "ANSIBLE_NET_USERNAME"])), + password=dict(type="str", required=False, no_log=True, fallback=(env_fallback, ["ND_PASSWORD", "ANSIBLE_NET_PASSWORD"])), + output_level=dict(type="str", default="normal", choices=["debug", "info", "normal"], fallback=(env_fallback, ["ND_OUTPUT_LEVEL"])), + timeout=dict(type="int", default=30, fallback=(env_fallback, ["ND_TIMEOUT"])), + use_proxy=dict(type="bool", fallback=(env_fallback, ["ND_USE_PROXY"])), + use_ssl=dict(type="bool", fallback=(env_fallback, ["ND_USE_SSL"])), + validate_certs=dict(type="bool", fallback=(env_fallback, ["ND_VALIDATE_CERTS"])), + login_domain=dict(type="str", fallback=(env_fallback, ["ND_LOGIN_DOMAIN"])), + ) + + +class NDModule: + """ + # Summary + + Simplified NDModule using RestSend infrastructure with exception-based error handling. + + This class provides a clean interface for making REST API requests to Nexus Dashboard + controllers. It uses the RestSend/Sender/ResponseHandler infrastructure for + separation of concerns and testability. + + ## Key Differences from nd.py NDModule + + 1. Uses exceptions (NDModuleError) instead of fail_json/exit_json + 2. No Connection class dependency - uses Sender for HTTP operations + 3. Minimal state - only tracks request/response metadata + 4. request() leverages RestSend -> Sender -> ResponseHandler + + ## Usage Example + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule, NDModuleError + from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + nd = NDModule(module) + + try: + # GET request + data = nd.request("/api/v1/endpoint") + + # POST request with payload + result = nd.request("/api/v1/endpoint", HttpVerbEnum.POST, {"key": "value"}) + except NDModuleError as e: + module.fail_json(**e.to_dict()) + ``` + + ## Raises + + - NDModuleError: When a request fails (replaces fail_json) + - ValueError: When RestSend encounters configuration errors + - TypeError: When invalid types are passed to RestSend + """ + + def __init__(self, module) -> None: + """ + Initialize NDModule with an AnsibleModule instance. + + Args: + module: AnsibleModule instance (or compatible mock for testing) + """ + self.class_name = self.__class__.__name__ + self.module = module + self.params: dict[str, Any] = module.params + + self.log = logging.getLogger(f"nd.{self.class_name}") + + # Request/response state (for debugging and error reporting) + self.method: Optional[str] = None + self.path: Optional[str] = None + self.response: Optional[str] = None + self.status: Optional[int] = None + self.url: Optional[str] = None + + # RestSend infrastructure (lazy initialized) + self._rest_send: Optional[RestSend] = None + self._sender: Optional[SenderProtocol] = None + self._response_handler: Optional[ResponseHandlerProtocol] = None + + if self.module._debug: + self.module.warn("Enable debug output because ANSIBLE_DEBUG was set.") + self.params["output_level"] = "debug" + + def _get_rest_send(self) -> RestSend: + """ + # Summary + + Lazy initialization of RestSend and its dependencies. + + ## Returns + + - RestSend: Configured RestSend instance ready for use. + """ + method_name = "_get_rest_send" + params = {} + if self._rest_send is None: + params = { + "check_mode": self.module.check_mode, + "state": self.params.get("state"), + } + self._sender = Sender() + self._sender.ansible_module = self.module + self._response_handler = ResponseHandler() + self._rest_send = RestSend(params) + self._rest_send.sender = self._sender + self._rest_send.response_handler = self._response_handler + + msg = f"{self.class_name}.{method_name}: " + msg += "Initialized RestSend instance with params: " + msg += f"{params}" + self.log.debug(msg) + return self._rest_send + + @property + def rest_send(self) -> RestSend: + """ + # Summary + + Access to the RestSend instance used by this NDModule. + + ## Returns + + - RestSend: The RestSend instance. + + ## Raises + + - `ValueError`: If accessed before `request()` has been called. + + ## Usage + + ```python + nd = NDModule(module) + data = nd.request("/api/v1/endpoint") + + # Access RestSend response/result + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ``` + """ + if self._rest_send is None: + msg = f"{self.class_name}.rest_send: " + msg += "rest_send must be initialized before accessing. " + msg += "Call request() first." + raise ValueError(msg) + return self._rest_send + + def request( + self, + path: str, + verb: HttpVerbEnum = HttpVerbEnum.GET, + data: Optional[dict[str, Any]] = None, + ) -> dict[str, Any]: + """ + # Summary + + Make a REST API request to the Nexus Dashboard controller. + + This method uses the RestSend infrastructure for improved separation + of concerns and testability. + + ## Args + + - path: The fully-formed API endpoint path including query string + (e.g., "/appcenter/cisco/ndfc/api/v1/endpoint?param=value") + - verb: HTTP verb as HttpVerbEnum (default: HttpVerbEnum.GET) + - data: Optional request payload as a dict + + ## Returns + + The response DATA from the controller (parsed JSON body). + + For full response metadata (status, message, etc.), access + `rest_send.response_current` and `rest_send.result_current` + after calling this method. + + ## Raises + + - `NDModuleError`: If the request fails (with status, payload, etc.) + - `ValueError`: If RestSend encounters configuration errors + - `TypeError`: If invalid types are passed + """ + method_name = "request" + # If PATCH with empty data, return early (existing behavior) + if verb == HttpVerbEnum.PATCH and not data: + return {} + + rest_send = self._get_rest_send() + + # Send the request + try: + rest_send.path = path + rest_send.verb = verb # type: ignore[assignment] + msg = f"{self.class_name}.{method_name}: " + msg += "Sending request " + msg += f"verb: {verb}, " + msg += f"path: {path}" + if data: + rest_send.payload = data + msg += f", data: {data}" + self.log.debug(msg) + rest_send.commit() + except (TypeError, ValueError) as error: + raise ValueError(f"Error in request: {error}") from error + + # Get response and result from RestSend + response = rest_send.response_current + result = rest_send.result_current + + # Update state for debugging/error reporting + self.method = verb.value + self.path = path + self.response = response.get("MESSAGE") + self.status = response.get("RETURN_CODE", -1) + self.url = response.get("REQUEST_PATH") + + # Handle errors based on result + if not result.get("success", False): + response_data = response.get("DATA") + + # Get error message from ResponseHandler + error_msg = self._response_handler.error_message if self._response_handler else "Unknown error" + + # Build exception with available context + raw = None + payload = None + + if isinstance(response_data, dict): + if "raw_response" in response_data: + raw = response_data["raw_response"] + else: + payload = response_data + + raise NDModuleError( + msg=error_msg if error_msg else "Unknown error", + status=self.status, + request_payload=data, + response_payload=payload, + raw=raw, + ) + + # Return the response data on success + return response.get("DATA", {}) diff --git a/plugins/module_utils/rest/__init__.py b/plugins/module_utils/rest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/rest/protocols/__init__.py b/plugins/module_utils/rest/protocols/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/rest/protocols/response_handler.py b/plugins/module_utils/rest/protocols/response_handler.py new file mode 100644 index 00000000..487e12cf --- /dev/null +++ b/plugins/module_utils/rest/protocols/response_handler.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-module-docstring +# pylint: disable=unnecessary-ellipsis +# pylint: disable=wrong-import-position +# Copyright: (c) 2026, Allen Robel (@arobel) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +""" +Protocol definition for ResponseHandler classes. +""" + +try: + from typing import Protocol, runtime_checkable +except ImportError: + try: + from typing_extensions import Protocol, runtime_checkable # type: ignore[assignment] + except ImportError: + + class Protocol: # type: ignore[no-redef] + """Stub for Python < 3.8 without typing_extensions.""" + + def runtime_checkable(cls): # type: ignore[no-redef] + return cls + + +from typing import Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +@runtime_checkable +class ResponseHandlerProtocol(Protocol): + """ + # Summary + + Protocol defining the interface for response handlers in RestSend. + + Any class implementing this protocol must provide: + + - `response` property (getter/setter): The controller response dict. + - `result` property (getter): The calculated result based on response and verb. + - `verb` property (getter/setter): The HTTP method (GET, POST, PUT, DELETE, etc.). + - `commit()` method: Parses response and sets result. + + ## Notes + + - Getters for `response`, `result`, and `verb` should raise `ValueError` if + accessed before being set. + + ## Example Implementations + + - `ResponseHandler` in `response_handler_nd.py`: Handles Nexus Dashboard responses. + - Future: `ResponseHandlerApic` for APIC controller responses. + """ + + @property + def response(self) -> dict: + """ + # Summary + + The controller response. + + ## Raises + + - ValueError: If accessed before being set. + """ + ... + + @response.setter + def response(self, value: dict) -> None: + pass + + @property + def result(self) -> dict: + """ + # Summary + + The calculated result based on response and verb. + + ## Raises + + - ValueError: If accessed before commit() is called. + """ + ... + + @property + def verb(self) -> HttpVerbEnum: + """ + # Summary + + HTTP method for the request. + + ## Raises + + - ValueError: If accessed before being set. + """ + ... + + @verb.setter + def verb(self, value: HttpVerbEnum) -> None: + pass + + def commit(self) -> None: + """ + # Summary + + Parse the response and set the result. + + ## Raises + + - ValueError: If response or verb is not set. + """ + ... + + @property + def error_message(self) -> Optional[str]: + """ + # Summary + + Human-readable error message extracted from response. + + ## Returns + + - str: Error message if an error occurred. + - None: If the request was successful or commit() not called. + """ + ... diff --git a/plugins/module_utils/rest/protocols/response_validation.py b/plugins/module_utils/rest/protocols/response_validation.py new file mode 100644 index 00000000..d1ec5ef0 --- /dev/null +++ b/plugins/module_utils/rest/protocols/response_validation.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# Summary + +Protocol definition for version-specific response validation strategies. + +## Description + +This module defines the ResponseValidationStrategy protocol which specifies +the interface for handling version-specific differences in ND API responses, +including status code validation and error message extraction. + +When ND API v2 is released with different status codes or response formats, +implementing a new strategy class allows clean separation of v1 and v2 logic. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +try: + from typing import Protocol, runtime_checkable +except ImportError: + try: + from typing_extensions import Protocol, runtime_checkable # type: ignore[assignment] + except ImportError: + + class Protocol: # type: ignore[no-redef] + """Stub for Python < 3.8 without typing_extensions.""" + + def runtime_checkable(cls): # type: ignore[no-redef] + return cls + + +from typing import Optional + +# pylint: disable=unnecessary-ellipsis + + +@runtime_checkable +class ResponseValidationStrategy(Protocol): + """ + # Summary + + Protocol for version-specific response validation. + + ## Description + + This protocol defines the interface for handling version-specific + differences in ND API responses, including status code validation + and error message extraction. + + Implementations of this protocol enable injecting version-specific + behavior into ResponseHandler without modifying the handler itself. + + ## Methods + + See property and method definitions below. + + ## Raises + + None - implementations may raise exceptions per their logic + """ + + @property + def success_codes(self) -> set[int]: + """ + # Summary + + Return set of HTTP status codes considered successful. + + ## Returns + + - Set of integers representing success status codes + """ + ... + + @property + def not_found_code(self) -> int: + """ + # Summary + + Return HTTP status code for resource not found. + + ## Returns + + - Integer representing not-found status code (typically 404) + """ + ... + + def is_success(self, response: dict) -> bool: + """ + # Summary + + Check if the full response indicates success. + + ## Description + + Implementations must check both the HTTP status code and any embedded error + indicators in the response body, since some ND API endpoints return a + successful status code (e.g. 200) while embedding an error in the payload. + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, etc. + + ## Returns + + - True if the response is fully successful (good status code and no embedded error), False otherwise + + ## Raises + + None + """ + ... + + def is_not_found(self, return_code: int) -> bool: + """ + # Summary + + Check if return code indicates not found. + + ## Parameters + + - return_code: HTTP status code to check + + ## Returns + + - True if code matches not_found_code, False otherwise + + ## Raises + + None + """ + ... + + def is_changed(self, response: dict) -> bool: + """ + # Summary + + Check if a successful mutation request actually changed state. + + ## Description + + Some ND API endpoints include a `modified` response header (string `"true"` or + `"false"`) that explicitly signals whether the operation mutated any state. + Implementations should honour this header when present and default to `True` + when it is absent (matching the historical behaviour for PUT/POST/DELETE). + + This method should only be called after `is_success` has returned `True`. + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, and any HTTP + response headers (lowercased) forwarded by the HttpAPI plugin. + + ## Returns + + - True if the operation changed state (or if the `modified` header is absent) + - False if the `modified` header is explicitly `"false"` + + ## Raises + + None + """ + ... + + def extract_error_message(self, response: dict) -> Optional[str]: + """ + # Summary + + Extract error message from response DATA. + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, etc. + + ## Returns + + - Error message string if found, None otherwise + + ## Raises + + None - should return None gracefully if error message cannot be extracted + """ + ... diff --git a/plugins/module_utils/rest/protocols/sender.py b/plugins/module_utils/rest/protocols/sender.py new file mode 100644 index 00000000..5e55047c --- /dev/null +++ b/plugins/module_utils/rest/protocols/sender.py @@ -0,0 +1,103 @@ +# pylint: disable=wrong-import-position +# pylint: disable=missing-module-docstring +# pylint: disable=unnecessary-ellipsis +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +try: + from typing import Protocol, runtime_checkable +except ImportError: + try: + from typing_extensions import Protocol, runtime_checkable # type: ignore[assignment] + except ImportError: + + class Protocol: # type: ignore[no-redef] + """Stub for Python < 3.8 without typing_extensions.""" + + def runtime_checkable(cls): # type: ignore[no-redef] + return cls + + +from typing import Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +@runtime_checkable +class SenderProtocol(Protocol): + """ + # Summary + + Protocol defining the sender interface for RestSend. + + Any class implementing this protocol must provide: + + - `path` property (getter/setter): The endpoint path for the REST request. + - `verb` property (getter/setter): The HTTP method (GET, POST, PUT, DELETE, etc.). + - `payload` property (getter/setter): Optional request payload as a dict. + - `response` property (getter): The response from the controller. + - `commit()` method: Sends the request to the controller. + + ## Example Implementations + + - `Sender` in `sender_nd.py`: Uses Ansible HttpApi plugin. + - `Sender` in `sender_file.py`: Reads responses from files (for testing). + """ + + @property + def path(self) -> str: + """Endpoint path for the REST request.""" + ... + + @path.setter + def path(self, value: str) -> None: + """Set the endpoint path for the REST request.""" + ... + + @property + def verb(self) -> HttpVerbEnum: + """HTTP method for the REST request.""" + ... + + @verb.setter + def verb(self, value: HttpVerbEnum) -> None: + """Set the HTTP method for the REST request.""" + ... + + @property + def payload(self) -> Optional[dict]: + """Optional payload to send to the controller.""" + ... + + @payload.setter + def payload(self, value: dict) -> None: + """Set the optional payload for the REST request.""" + ... + + @property + def response(self) -> dict: + """The response from the controller.""" + ... + + def commit(self) -> None: + """ + Send the request to the controller. + + Raises: + ConnectionError: If there is an error with the connection. + """ + ... diff --git a/plugins/module_utils/rest/response_handler_nd.py b/plugins/module_utils/rest/response_handler_nd.py new file mode 100644 index 00000000..e7026d30 --- /dev/null +++ b/plugins/module_utils/rest/response_handler_nd.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# response_handler_nd.py + +Implements the ResponseHandler interface for handling Nexus Dashboard controller responses. + +## Version Compatibility + +This handler is designed for ND API v1 responses (ND 4.2+). + +### Status Code Assumptions + +Status codes are defined by the injected `ResponseValidationStrategy`, defaulting +to `NdV1Strategy` (ND 4.2+): + +- Success: 200, 201, 202, 204, 207 +- Not Found: 404 (treated as success for GET) +- Error: 405, 409 + +If ND API v2 uses different codes, inject a new strategy via the +`validation_strategy` property rather than modifying this class. + +### Response Format + +Expects ND HttpApi plugin to provide responses with these keys: + +- RETURN_CODE (int): HTTP status code (e.g., 200, 404, 500) +- MESSAGE (str): HTTP reason phrase (e.g., "OK", "Not Found") +- DATA (dict): Parsed JSON body or dict with raw_response if parsing failed +- REQUEST_PATH (str): The request URL path +- METHOD (str): The HTTP method used (GET, POST, PUT, DELETE, PATCH) + +### Supported Error Formats + +The error_message property handles multiple ND API v1 error response formats: + +1. code/message dict: {"code": , "message": } +2. messages array: {"messages": [{"code": , "severity": , "message": }]} +3. errors array: {"errors": [, ...]} +4. raw_response: {"raw_response": } for non-JSON responses + +If ND API v2 changes error response structures, error extraction logic will need updates. + +## Future v2 Considerations + +If ND API v2 changes response format or status codes, implement a new strategy +class (e.g. `NdV2Strategy`) conforming to `ResponseValidationStrategy` and inject +it via `response_handler.validation_strategy = NdV2Strategy()`. + +TODO: Should response be converted to a Pydantic model by this class? +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import copy +import logging +from typing import Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.protocols.response_validation import ResponseValidationStrategy +from ansible_collections.cisco.nd.plugins.module_utils.rest.response_strategies.nd_v1_strategy import NdV1Strategy + + +class ResponseHandler: + """ + # Summary + + Implement the response handler interface for injection into RestSend(). + + ## Raises + + - `TypeError` if: + - `response` is not a dict. + - `ValueError` if: + - `response` is missing any fields required by the handler + to calculate the result. + - Required fields: + - `RETURN_CODE` + - `MESSAGE` + - `response` is not set prior to calling `commit()`. + - `verb` is not set prior to calling `commit()`. + + ## Interface specification + + - `response` setter property + - Accepts a dict containing the controller response. + - Raises `TypeError` if: + - `response` is not a dict. + - Raises `ValueError` if: + - `response` is missing any fields required by the handler + to calculate the result, for example `RETURN_CODE` and + `MESSAGE`. + - `result` getter property + - Returns a dict containing the calculated result based on the + controller response and the request verb. + - Raises `ValueError` if `result` is accessed before calling + `commit()`. + - `result` setter property + - Set internally by the handler based on the response and verb. + - `verb` setter property + - Accepts an HttpVerbEnum enum defining the request verb. + - Valid verb: One of "DELETE", "GET", "POST", "PUT". + - e.g. HttpVerbEnum.GET, HttpVerbEnum.POST, etc. + - Raises `ValueError` if verb is not set prior to calling `commit()`. + - `commit()` method + - Parse `response` and set `result`. + - Raise `ValueError` if: + - `response` is not set. + - `verb` is not set. + + ## Usage example + + ```python + # import and instantiate the class + from ansible_collections.cisco.nd.plugins.module_utils.rest.response_handler_nd import \ + ResponseHandler + response_handler = ResponseHandler() + + try: + # Set the response from the controller + response_handler.response = controller_response + + # Set the request verb + response_handler.verb = HttpVerbEnum.GET + + # Call commit to parse the response + response_handler.commit() + + # Access the result + result = response_handler.result + except (TypeError, ValueError) as error: + handle_error(error) + ``` + + """ + + def __init__(self) -> None: + self.class_name = self.__class__.__name__ + method_name = "__init__" + + self.log = logging.getLogger(f"nd.{self.class_name}") + + self._response: Optional[dict[str, Any]] = None + self._result: Optional[dict[str, Any]] = None + self._strategy: ResponseValidationStrategy = NdV1Strategy() + self._verb: Optional[HttpVerbEnum] = None + + msg = f"ENTERED {self.class_name}.{method_name}" + self.log.debug(msg) + + def _handle_response(self) -> None: + """ + # Summary + + Call the appropriate handler for response based on the value of self.verb + """ + if self.verb == HttpVerbEnum.GET: + self._handle_get_response() + else: + self._handle_post_put_delete_response() + + def _handle_get_response(self) -> None: + """ + # Summary + + Handle GET responses from the controller and set self.result. + + - self.result is a dict containing: + - found: + - False if RETURN_CODE == 404 + - True otherwise (when successful) + - success: + - True if RETURN_CODE in (200, 201, 202, 204, 207, 404) + - False otherwise (error status codes) + """ + result = {} + return_code = self.response.get("RETURN_CODE") + + # 404 Not Found - resource doesn't exist, but request was successful + if self._strategy.is_not_found(return_code): + result["found"] = False + result["success"] = True + # Success codes with no embedded error - resource found + elif self._strategy.is_success(self.response): + result["found"] = True + result["success"] = True + # Error codes - request failed + else: + result["found"] = False + result["success"] = False + + self.result = copy.copy(result) + + def _handle_post_put_delete_response(self) -> None: + """ + # Summary + + Handle POST, PUT, DELETE responses from the controller and set + self.result. + + - self.result is a dict containing: + - changed: + - True if RETURN_CODE in (200, 201, 202, 204, 207) and no ERROR + - False otherwise + - success: + - True if RETURN_CODE in (200, 201, 202, 204, 207) and no ERROR + - False otherwise + """ + result = {} + + # Success codes with no embedded error indicate the operation completed. + # Use the modified header (when present) as the authoritative signal for + # whether state was actually changed, falling back to True when absent. + if self._strategy.is_success(self.response): + result["success"] = True + result["changed"] = self._strategy.is_changed(self.response) + else: + result["success"] = False + result["changed"] = False + + self.result = copy.copy(result) + + def commit(self) -> None: + """ + # Summary + + Parse the response from the controller and set self.result + based on the response. + + ## Raises + + - ``ValueError`` if: + - ``response`` is not set. + - ``verb`` is not set. + """ + method_name = "commit" + msg = f"{self.class_name}.{method_name}: " + msg += f"response {self.response}, verb {self.verb}" + self.log.debug(msg) + self._handle_response() + + @property + def response(self) -> dict[str, Any]: + """ + # Summary + + The controller response. + + ## Raises + + - getter: ``ValueError`` if response is not set. + - setter: ``TypeError`` if ``response`` is not a dict. + - setter: ``ValueError`` if ``response`` is missing required fields + (``RETURN_CODE``, ``MESSAGE``). + """ + if self._response is None: + msg = f"{self.class_name}.response: " + msg += "response must be set before accessing." + raise ValueError(msg) + return self._response + + @response.setter + def response(self, value: dict[str, Any]) -> None: + method_name = "response" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{self.class_name}.{method_name} must be a dict. " + msg += f"Got {value}." + raise TypeError(msg) + if value.get("MESSAGE", None) is None: + msg = f"{self.class_name}.{method_name}: " + msg += "response must have a MESSAGE key. " + msg += f"Got: {value}." + raise ValueError(msg) + if value.get("RETURN_CODE", None) is None: + msg = f"{self.class_name}.{method_name}: " + msg += "response must have a RETURN_CODE key. " + msg += f"Got: {value}." + raise ValueError(msg) + self._response = value + + @property + def result(self) -> dict[str, Any]: + """ + # Summary + + The result calculated by the handler based on the controller response. + + ## Raises + + - getter: ``ValueError`` if result is not set (commit() not called). + - setter: ``TypeError`` if result is not a dict. + """ + if self._result is None: + msg = f"{self.class_name}.result: " + msg += "result must be set before accessing. Call commit() first." + raise ValueError(msg) + return self._result + + @result.setter + def result(self, value: dict[str, Any]) -> None: + method_name = "result" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{self.class_name}.{method_name} must be a dict. " + msg += f"Got {value}." + raise TypeError(msg) + self._result = value + + @property + def verb(self) -> HttpVerbEnum: + """ + # Summary + + HTTP method for the REST request e.g. HttpVerbEnum.GET, HttpVerbEnum.POST, etc. + + ## Raises + + - ``ValueError`` if value is not set. + """ + if self._verb is None: + raise ValueError(f"{self.class_name}.verb is not set.") + return self._verb + + @verb.setter + def verb(self, value: HttpVerbEnum) -> None: + self._verb = value + + @property + def error_message(self) -> Optional[str]: + """ + # Summary + + Extract a human-readable error message from the response DATA. + + Delegates to the injected `ResponseValidationStrategy`. Returns None if + result indicates success or if `commit()` has not been called. + + ## Returns + + - str: Human-readable error message if an error occurred. + - None: If the request was successful or `commit()` not called. + + ## Raises + + None + """ + if self._result is not None and not self._result.get("success", True): + return self._strategy.extract_error_message(self._response) + return None + + @property + def validation_strategy(self) -> ResponseValidationStrategy: + """ + # Summary + + The response validation strategy used to check status codes and extract + error messages. + + ## Returns + + - `ResponseValidationStrategy`: The current strategy instance. + + ## Raises + + None + """ + return self._strategy + + @validation_strategy.setter + def validation_strategy(self, value: ResponseValidationStrategy) -> None: + """ + # Summary + + Set the response validation strategy. + + ## Raises + + ### TypeError + + - If `value` does not implement `ResponseValidationStrategy`. + """ + method_name = "validation_strategy" + if not isinstance(value, ResponseValidationStrategy): + msg = f"{self.class_name}.{method_name}: " + msg += f"Expected ResponseValidationStrategy. Got {type(value)}." + raise TypeError(msg) + self._strategy = value diff --git a/plugins/module_utils/rest/response_strategies/__init__.py b/plugins/module_utils/rest/response_strategies/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py b/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py new file mode 100644 index 00000000..58c7784f --- /dev/null +++ b/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# Summary + +ND API v1 response validation strategy. + +## Description + +Implements status code validation and error message extraction for ND API v1 +responses (ND 4.2). + +This strategy encapsulates the response handling logic previously hardcoded +in ResponseHandler, enabling version-specific behavior to be injected. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +from typing import Any, Optional + + +class NdV1Strategy: + """ + # Summary + + Response validation strategy for ND API v1. + + ## Description + + Implements status code validation and error message extraction + for ND API v1 (ND 4.2+). + + ## Status Codes + + - Success: 200, 201, 202, 204, 207 + - Not Found: 404 (treated as success for GET) + - Error: anything not in success codes and not 404 + + ## Error Formats Supported + + 1. raw_response: Non-JSON response stored in DATA.raw_response + 2. code/message: DATA.code and DATA.message + 3. messages array: all DATA.messages[].{code, severity, message} joined with "; " + 4. errors array: all DATA.errors[] joined with "; " + 5. Connection failure: No DATA with REQUEST_PATH and MESSAGE + 6. Non-dict DATA: Stringified DATA value + 7. Unknown: Fallback with RETURN_CODE + + ## Raises + + None + """ + + @property + def success_codes(self) -> set[int]: + """ + # Summary + + Return v1 success codes. + + ## Returns + + - Set of integers: {200, 201, 202, 204, 207} + + ## Raises + + None + """ + return {200, 201, 202, 204, 207} + + @property + def not_found_code(self) -> int: + """ + # Summary + + Return v1 not found code. + + ## Returns + + - Integer: 404 + + ## Raises + + None + """ + return 404 + + def is_success(self, response: dict) -> bool: + """ + # Summary + + Check if the full response indicates success (v1). + + ## Description + + Returns True only when both conditions hold: + + 1. `RETURN_CODE` is in `success_codes` + 2. The response body contains no embedded error indicators + + Embedded error indicators checked: + + - Top-level `ERROR` key is present + - `DATA.error` key is present + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, etc. + + ## Returns + + - True if the response is fully successful, False otherwise + + ## Raises + + None + """ + return_code = response.get("RETURN_CODE", -1) + if return_code not in self.success_codes: + return False + if response.get("ERROR") is not None: + return False + data = response.get("DATA") + if isinstance(data, dict) and data.get("error") is not None: + return False + return True + + def is_not_found(self, return_code: int) -> bool: + """ + # Summary + + Check if return code indicates not found (v1). + + ## Parameters + + - return_code: HTTP status code to check + + ## Returns + + - True if code matches not_found_code, False otherwise + + ## Raises + + None + """ + return return_code == self.not_found_code + + def is_changed(self, response: dict) -> bool: + """ + # Summary + + Check if a successful mutation request actually changed state (v1). + + ## Description + + ND API v1 may include a `modified` response header (forwarded by the HttpAPI + plugin as a lowercase key in the response dict) with string values `"true"` or + `"false"`. When present, this header is the authoritative signal for whether + the operation mutated any state on the controller. + + When the header is absent the method defaults to `True`, preserving the + historical behaviour for verbs (DELETE, POST, PUT) where ND does not send it. + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, and any HTTP + response headers (lowercased) forwarded by the HttpAPI plugin. + + ## Returns + + - False if the `modified` header is present and equals `"false"` (case-insensitive) + - True otherwise + + ## Raises + + None + """ + modified = response.get("modified") + if modified is None: + return True + return str(modified).lower() != "false" + + def extract_error_message(self, response: dict) -> Optional[str]: + """ + # Summary + + Extract error message from v1 response DATA. + + ## Description + + Handles multiple ND API v1 error formats in priority order: + + 1. Connection failure (no DATA) + 2. Non-JSON response (raw_response in DATA) + 3. code/message dict + 4. messages array with code/severity/message (all items joined) + 5. errors array (all items joined) + 6. Unknown dict format + 7. Non-dict DATA + + ## Parameters + + - response: Response dict with keys RETURN_CODE, MESSAGE, DATA, REQUEST_PATH + + ## Returns + + - Error message string if found, None otherwise + + ## Raises + + None - returns None gracefully if error message cannot be extracted + """ + msg: Optional[str] = None + + response_data = response.get("DATA") if response else None + return_code = response.get("RETURN_CODE", -1) if response else -1 + + # No response data - connection failure + if response_data is None: + request_path = response.get("REQUEST_PATH", "unknown") if response else "unknown" + message = response.get("MESSAGE", "Unknown error") if response else "Unknown error" + msg = f"Connection failed for {request_path}. {message}" + # Dict response data - check various ND error formats + elif isinstance(response_data, dict): + # Type-narrow response_data to dict[str, Any] for pylint + # pylint: disable=unsupported-membership-test,unsubscriptable-object + data_dict: dict[str, Any] = response_data + # Raw response (non-JSON) + if "raw_response" in data_dict: + msg = "ND Error: Response could not be parsed as JSON" + # code/message format + elif "code" in data_dict and "message" in data_dict: + msg = f"ND Error {data_dict['code']}: {data_dict['message']}" + + # messages array format + if msg is None and "messages" in data_dict and len(data_dict.get("messages", [])) > 0: + parts = [] + for m in data_dict["messages"]: + if all(k in m for k in ("code", "severity", "message")): + parts.append(f"ND Error {m['code']} ({m['severity']}): {m['message']}") + if parts: + msg = "; ".join(parts) + + # errors array format + if msg is None and "errors" in data_dict and len(data_dict.get("errors", [])) > 0: + msg = f"ND Error: {'; '.join(str(e) for e in data_dict['errors'])}" + + # Unknown dict format - fallback + if msg is None: + msg = f"ND Error: Request failed with status {return_code}" + # Non-dict response data + else: + msg = f"ND Error: {response_data}" + + return msg diff --git a/plugins/module_utils/rest/rest_send.py b/plugins/module_utils/rest/rest_send.py new file mode 100644 index 00000000..c87009a5 --- /dev/null +++ b/plugins/module_utils/rest/rest_send.py @@ -0,0 +1,819 @@ +# -*- coding: utf-8 -*- +# pylint: disable=wrong-import-position +# pylint: disable=missing-module-docstring +# Copyright: (c) 2026, Allen Robel (@arobel) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + + +import copy +import inspect +import json +import logging +from time import sleep +from typing import Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.protocols.response_handler import ResponseHandlerProtocol +from ansible_collections.cisco.nd.plugins.module_utils.rest.protocols.sender import SenderProtocol +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results + + +class RestSend: + """ + # Summary + + - Send REST requests to the controller with retries. + - Accepts a `Sender()` class that implements SenderProtocol. + - The sender interface is defined in + `module_utils/rest/protocols/sender.py` + - Accepts a `ResponseHandler()` class that implements the response + handler interface. + - The response handler interface is defined in + `module_utils/rest/protocols/response_handler.py` + + ## Raises + + - `ValueError` if: + - ResponseHandler() raises `TypeError` or `ValueError` + - Sender().commit() raises `ValueError` + - `verb` is not a valid verb (GET, POST, PUT, DELETE) + - `TypeError` if: + - `check_mode` is not a `bool` + - `path` is not a `str` + - `payload` is not a `dict` + - `add_response()` value is not a `dict` + - `response_current` is not a `dict` + - `response_handler` is not an instance of + `ResponseHandler()` + - `add_result()` value is not a `dict` + - `result_current` is not a `dict` + - `send_interval` is not an `int` + - `sender` is not an instance of `SenderProtocol` + - `timeout` is not an `int` + - `unit_test` is not a `bool` + + ## Usage discussion + + - A Sender() class is used in the usage example below that requires an + instance of `AnsibleModule`, and uses the connection plugin (plugins/httpapi.nd.py) + to send requests to the controller. + - See ``module_utils/rest/protocols/sender.py`` for details about + implementing `Sender()` classes. + - A `ResponseHandler()` class is used in the usage example below that + abstracts controller response handling. It accepts a controller + response dict and returns a result dict. + - See `module_utils/rest/protocols/response_handler.py` for details + about implementing `ResponseHandler()` classes. + + ## Usage example + + ```python + params = {"check_mode": False, "state": "merged"} + sender = Sender() # class that implements SenderProtocol + sender.ansible_module = ansible_module + + try: + rest_send = RestSend(params) + rest_send.sender = sender + rest_send.response_handler = ResponseHandler() + rest_send.unit_test = True # optional, use in unit tests for speed + rest_send.path = "/rest/top-down/fabrics" + rest_send.verb = HttpVerbEnum.GET + rest_send.payload = my_payload # optional + rest_send.save_settings() # save current check_mode and timeout + rest_send.timeout = 300 # optional + rest_send.check_mode = True + # Do things with rest_send... + rest_send.commit() + rest_send.restore_settings() # restore check_mode and timeout + except (TypeError, ValueError) as error: + # Handle error + + # list of responses from the controller for this session + responses = rest_send.responses + # dict containing the current controller response + response_current = rest_send.response_current + # list of results from the controller for this session + results = rest_send.results + # dict containing the current controller result + result_current = rest_send.result_current + ``` + """ + + def __init__(self, params) -> None: + self.class_name = self.__class__.__name__ + + self.log = logging.getLogger(f"nd.{self.class_name}") + + self.params = params + msg = "ENTERED RestSend(): " + msg += f"params: {self.params}" + self.log.debug(msg) + + self._check_mode: bool = False + self._committed_payload: Optional[dict] = None + self._path: Optional[str] = None + self._payload: Optional[dict] = None + self._response: list[dict[str, Any]] = [] + self._response_current: dict[str, Any] = {} + self._response_handler: Optional[ResponseHandlerProtocol] = None + self._result: list[dict] = [] + self._result_current: dict = {} + self._send_interval: int = 5 + self._sender: Optional[SenderProtocol] = None + self._timeout: int = 300 + self._unit_test: bool = False + self._verb: HttpVerbEnum = HttpVerbEnum.GET + + # See save_settings() and restore_settings() + self.saved_timeout: Optional[int] = None + self.saved_check_mode: Optional[bool] = None + + self.check_mode = self.params.get("check_mode", False) + + msg = "ENTERED RestSend(): " + msg += f"check_mode: {self.check_mode}" + self.log.debug(msg) + + def restore_settings(self) -> None: + """ + # Summary + + Restore `check_mode` and `timeout` to their saved values. + + ## Raises + + None + + ## See also + + - `save_settings()` + + ## Discussion + + This is useful when a task needs to temporarily set `check_mode` + to False, (or change the timeout value) and then restore them to + their original values. + + - `check_mode` is not restored if `save_settings()` has not + previously been called. + - `timeout` is not restored if `save_settings()` has not + previously been called. + """ + if self.saved_check_mode is not None: + self.check_mode = self.saved_check_mode + if self.saved_timeout is not None: + self.timeout = self.saved_timeout + + def save_settings(self) -> None: + """ + # Summary + + Save the current values of `check_mode` and `timeout` for later + restoration. + + ## Raises + + None + + ## See also + + - `restore_settings()` + + ## NOTES + + - `check_mode` is not saved if it has not yet been initialized. + - `timeout` is not saved if it has not yet been initialized. + """ + if self.check_mode is not None: + self.saved_check_mode = self.check_mode + if self.timeout is not None: + self.saved_timeout = self.timeout + + def commit(self) -> None: + """ + # Summary + + Send the REST request to the controller + + ## Raises + + - `ValueError` if: + - RestSend()._commit_normal_mode() raises + `ValueError` + - ResponseHandler() raises `TypeError` or `ValueError` + - Sender().commit() raises `ValueError` + - `verb` is not a valid verb (GET, POST, PUT, DELETE) + - `TypeError` if: + - `check_mode` is not a `bool` + - `path` is not a `str` + - `payload` is not a `dict` + - `response` is not a `dict` + - `response_current` is not a `dict` + - `response_handler` is not an instance of + `ResponseHandler()` + - `result` is not a `dict` + - `result_current` is not a `dict` + - `send_interval` is not an `int` + - `sender` is not an instance of `Sender()` + - `timeout` is not an `int` + - `unit_test` is not a `bool` + + """ + method_name = "commit" + msg = f"{self.class_name}.{method_name}: " + msg += f"check_mode: {self.check_mode}, " + msg += f"verb: {self.verb}, " + msg += f"path: {self.path}." + self.log.debug(msg) + + try: + if self.check_mode is True: + self._commit_check_mode() + else: + self._commit_normal_mode() + except (TypeError, ValueError) as error: + msg = f"{self.class_name}.{method_name}: " + msg += "Error during commit. " + msg += f"Error details: {error}" + raise ValueError(msg) from error + + def _commit_check_mode(self) -> None: + """ + # Summary + + Simulate a controller request for check_mode. + + ## Raises + + - `ValueError` if: + - ResponseHandler() raises `TypeError` or `ValueError` + - self.response_current raises `TypeError` + - self.result_current raises `TypeError` + + + ## Properties read: + + - `verb`: HttpVerbEnum e.g. HttpVerb.DELETE, HttpVerb.GET, etc. + - `path`: HTTP path e.g. http://controller_ip/path/to/endpoint + - `payload`: Optional HTTP payload + + ## Properties written: + + - `response_current`: raw simulated response + - `result_current`: result from self._handle_response() method + """ + method_name = "_commit_check_mode" + + msg = f"{self.class_name}.{method_name}: " + msg += f"verb {self.verb}, path {self.path}." + self.log.debug(msg) + + response_current: dict = {} + response_current["RETURN_CODE"] = 200 + response_current["METHOD"] = self.verb + response_current["REQUEST_PATH"] = self.path + response_current["MESSAGE"] = "OK" + response_current["CHECK_MODE"] = True + response_current["DATA"] = {"simulated": "check-mode-response", "status": "Success"} + + try: + self.response_current = response_current + self.response_handler.response = self.response_current + self.response_handler.verb = self.verb + self.response_handler.commit() + self.result_current = self.response_handler.result + self._response.append(self.response_current) + self._result.append(self.result_current) + self._committed_payload = copy.deepcopy(self._payload) + except (TypeError, ValueError) as error: + msg = f"{self.class_name}.{method_name}: " + msg += "Error building response/result. " + msg += f"Error detail: {error}" + raise ValueError(msg) from error + + def _commit_normal_mode(self) -> None: + """ + # Summary + + Call sender.commit() with retries until successful response or timeout is exceeded. + + ## Raises + + - `ValueError` if: + - HandleResponse() raises `ValueError` + - Sender().commit() raises `ValueError` + - `verb` is not a valid verb (GET, POST, PUT, DELETE)""" + method_name = "_commit_normal_mode" + timeout = copy.copy(self.timeout) + + msg = "Entering commit loop. " + msg += f"timeout: {timeout}, unit_test: {self.unit_test}." + self.log.debug(msg) + + self.sender.path = self.path + self.sender.verb = self.verb + if self.payload is not None: + self.sender.payload = self.payload + success = False + while timeout > 0 and success is False: + msg = f"{self.class_name}.{method_name}: " + msg += "Calling sender.commit(): " + msg += f"timeout {timeout}, success {success}, verb {self.verb}, path {self.path}." + self.log.debug(msg) + + try: + self.sender.commit() + except ValueError as error: + raise ValueError(error) from error + + self.response_current = self.sender.response + # Handle controller response and derive result + try: + self.response_handler.response = self.response_current + self.response_handler.verb = self.verb + self.response_handler.commit() + self.result_current = self.response_handler.result + except (TypeError, ValueError) as error: + msg = f"{self.class_name}.{method_name}: " + msg += "Error building response/result. " + msg += f"Error detail: {error}" + self.log.debug(msg) + raise ValueError(msg) from error + + msg = f"{self.class_name}.{method_name}: " + msg += f"timeout: {timeout}. " + msg += f"result_current: {json.dumps(self.result_current, indent=4, sort_keys=True)}." + self.log.debug(msg) + + msg = f"{self.class_name}.{method_name}: " + msg += f"timeout: {timeout}. " + msg += "response_current: " + msg += f"{json.dumps(self.response_current, indent=4, sort_keys=True)}." + self.log.debug(msg) + + success = self.result_current["success"] + if success is False: + if self.unit_test is False: + sleep(self.send_interval) + timeout -= self.send_interval + msg = f"{self.class_name}.{method_name}: " + msg += f"Subtracted {self.send_interval} from timeout. " + msg += f"timeout: {timeout}." + self.log.debug(msg) + + self._response.append(self.response_current) + self._result.append(self.result_current) + self._committed_payload = copy.deepcopy(self._payload) + self._payload = None + + @property + def check_mode(self) -> bool: + """ + # Summary + + Determines if changes should be made on the controller. + + ## Raises + + - `TypeError` if value is not a `bool` + + ## Default + + `False` + + - If `False`, write operations, if any, are made on the controller. + - If `True`, write operations are not made on the controller. + Instead, controller responses for write operations are simulated + to be successful (200 response code) and these simulated responses + are returned by RestSend(). Read operations are not affected + and are sent to the controller and real responses are returned. + + ## Discussion + + We want to be able to read data from the controller for read-only + operations (i.e. to set check_mode to False temporarily, even when + the user has set check_mode to True). For example, SwitchDetails + is a read-only operation, and we want to be able to read this data to + provide a real controller response to the user. + """ + return self._check_mode + + @check_mode.setter + def check_mode(self, value: bool) -> None: + method_name = "check_mode" + if not isinstance(value, bool): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a boolean. Got {value}." + raise TypeError(msg) + self._check_mode = value + + @property + def committed_payload(self) -> Optional[dict]: + """ + # Summary + + Return the payload that was sent in the most recent commit, or None. + + ## Raises + + None + + ## Description + + After `commit()`, `self.payload` is reset to None. This property + preserves the payload that was actually sent, so consumers can + read it for registration in Results. + """ + return self._committed_payload + + @property + def failed_result(self) -> dict: + """ + Return a result for a failed task with no changes + """ + return Results().failed_result + + @property + def path(self) -> str: + """ + # Summary + + Endpoint path for the REST request. + + ## Raises + + - getter: `ValueError` if `path` is not set before accessing. + + ## Example + + `/appcenter/cisco/ndfc/api/v1/...etc...` + """ + if self._path is None: + msg = f"{self.class_name}.path: path must be set before accessing." + raise ValueError(msg) + return self._path + + @path.setter + def path(self, value: str) -> None: + self._path = value + + @property + def payload(self) -> Optional[dict]: + """ + # Summary + + Return the payload to send to the controller, or None. + + ## Raises + + - setter: `TypeError` if value is not a `dict` + """ + return self._payload + + @payload.setter + def payload(self, value: dict): + method_name = "payload" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a dict. Got {value}." + raise TypeError(msg) + self._payload = value + + @property + def response_current(self) -> dict: + """ + # Summary + + Return the current response from the controller as a `dict`. + `commit()` must be called first. + + ## Raises + + - setter: `TypeError` if value is not a `dict` + """ + return copy.deepcopy(self._response_current) + + @response_current.setter + def response_current(self, value): + method_name = "response_current" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a dict. " + msg += f"Got type {type(value).__name__}, " + msg += f"Value: {value}." + raise TypeError(msg) + self._response_current = value + + @property + def responses(self) -> list[dict]: + """ + # Summary + + The aggregated list of responses from the controller. + + `commit()` must be called first. + """ + return copy.deepcopy(self._response) + + def add_response(self, value: dict) -> None: + """ + # Summary + + Append a response dict to the response list. + + ## Raises + + - `TypeError` if value is not a `dict` + """ + method_name = "add_response" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += "value must be a dict. " + msg += f"Got type {type(value).__name__}, " + msg += f"Value: {value}." + raise TypeError(msg) + self._response.append(value) + + @property + def response_handler(self) -> ResponseHandlerProtocol: + """ + # Summary + + A class that implements ResponseHandlerProtocol. + + ## Raises + + - getter: `ValueError` if `response_handler` is not set before accessing. + - setter: `TypeError` if `value` does not implement `ResponseHandlerProtocol`. + + ## NOTES + + - See module_utils/rest/protocols/response_handler.py for the protocol definition. + """ + if self._response_handler is None: + msg = f"{self.class_name}.response_handler: " + msg += "response_handler must be set before accessing." + raise ValueError(msg) + return self._response_handler + + @staticmethod + def _has_member_static(obj: Any, member: str) -> bool: + """ + Check whether an object has a member without triggering descriptors. + + This avoids invoking property getters during dependency validation. + """ + try: + inspect.getattr_static(obj, member) + return True + except AttributeError: + return False + + @response_handler.setter + def response_handler(self, value: ResponseHandlerProtocol): + required_members = ( + "response", + "result", + "verb", + "commit", + "error_message", + ) + missing_members = [member for member in required_members if not self._has_member_static(value, member)] + if missing_members: + msg = f"{self.class_name}.response_handler: " + msg += "value must implement ResponseHandlerProtocol. " + msg += f"Missing members: {missing_members}. " + msg += f"Got type {type(value).__name__}." + raise TypeError(msg) + if not callable(getattr(value, "commit", None)): + msg = f"{self.class_name}.response_handler: " + msg += "value.commit must be callable. " + msg += f"Got type {type(value).__name__}." + raise TypeError(msg) + self._response_handler = value + + @property + def results(self) -> list[dict]: + """ + # Summary + + The aggregated list of results from the controller. + + `commit()` must be called first. + """ + return copy.deepcopy(self._result) + + def add_result(self, value: dict) -> None: + """ + # Summary + + Append a result dict to the result list. + + ## Raises + + - `TypeError` if value is not a `dict` + """ + method_name = "add_result" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += "value must be a dict. " + msg += f"Got type {type(value).__name__}, " + msg += f"Value: {value}." + raise TypeError(msg) + self._result.append(value) + + @property + def result_current(self) -> dict: + """ + # Summary + + The current result from the controller + + `commit()` must be called first. + + This is a dict containing the current result. + + ## Raises + + - setter: `TypeError` if value is not a `dict` + + """ + return copy.deepcopy(self._result_current) + + @result_current.setter + def result_current(self, value: dict): + method_name = "result_current" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a dict. " + msg += f"Got {value}." + raise TypeError(msg) + self._result_current = value + + @property + def send_interval(self) -> int: + """ + # Summary + + Send interval, in seconds, for retrying responses from the controller. + + ## Raises + + - setter: ``TypeError`` if value is not an `int` + + ## Default + + `5` + """ + return self._send_interval + + @send_interval.setter + def send_interval(self, value: int) -> None: + method_name = "send_interval" + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be an integer. " + msg += f"Got type {type(value).__name__}, " + msg += f"value {value}." + # Check explicit boolean first since isinstance(True, int) is True + if isinstance(value, bool): + raise TypeError(msg) + if not isinstance(value, int): + raise TypeError(msg) + self._send_interval = value + + @property + def sender(self) -> SenderProtocol: + """ + # Summary + + A class implementing the SenderProtocol. + + See module_utils/rest/protocols/sender.py for SenderProtocol definition. + + ## Raises + + - getter: ``ValueError`` if sender is not set before accessing. + - setter: ``TypeError`` if value does not implement SenderProtocol. + """ + if self._sender is None: + msg = f"{self.class_name}.sender: " + msg += "sender must be set before accessing." + raise ValueError(msg) + return self._sender + + @sender.setter + def sender(self, value: SenderProtocol): + required_members = ( + "path", + "verb", + "payload", + "response", + "commit", + ) + missing_members = [member for member in required_members if not self._has_member_static(value, member)] + if missing_members: + msg = f"{self.class_name}.sender: " + msg += "value must implement SenderProtocol. " + msg += f"Missing members: {missing_members}. " + msg += f"Got type {type(value).__name__}." + raise TypeError(msg) + if not callable(getattr(value, "commit", None)): + msg = f"{self.class_name}.sender: " + msg += "value.commit must be callable. " + msg += f"Got type {type(value).__name__}." + raise TypeError(msg) + self._sender = value + + @property + def timeout(self) -> int: + """ + # Summary + + Timeout, in seconds, for retrieving responses from the controller. + + ## Raises + + - setter: ``TypeError`` if value is not an ``int`` + + ## Default + + `300` + """ + return self._timeout + + @timeout.setter + def timeout(self, value: int) -> None: + method_name = "timeout" + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be an integer. " + msg += f"Got type {type(value).__name__}, " + msg += f"value {value}." + if isinstance(value, bool): + raise TypeError(msg) + if not isinstance(value, int): + raise TypeError(msg) + self._timeout = value + + @property + def unit_test(self) -> bool: + """ + # Summary + + Is RestSend being called from a unit test. + Set this to True in unit tests to speed the test up. + + ## Raises + + - setter: `TypeError` if value is not a `bool` + + ## Default + + `False` + """ + return self._unit_test + + @unit_test.setter + def unit_test(self, value: bool) -> None: + method_name = "unit_test" + if not isinstance(value, bool): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a boolean. " + msg += f"Got type {type(value).__name__}, " + msg += f"value {value}." + raise TypeError(msg) + self._unit_test = value + + @property + def verb(self) -> HttpVerbEnum: + """ + # Summary + + HTTP method for the REST request e.g. HttpVerbEnum.GET, HttpVerbEnum.POST, etc. + + ## Raises + + - setter: `TypeError` if value is not an instance of HttpVerbEnum + - getter: `ValueError` if verb is not set before accessing. + """ + if self._verb is None: + msg = f"{self.class_name}.verb: " + msg += "verb must be set before accessing." + raise ValueError(msg) + return self._verb + + @verb.setter + def verb(self, value: HttpVerbEnum): + if not isinstance(value, HttpVerbEnum): + msg = f"{self.class_name}.verb: " + msg += "verb must be an instance of HttpVerbEnum. " + msg += f"Got type {type(value).__name__}." + raise TypeError(msg) + self._verb = value diff --git a/plugins/module_utils/rest/results.py b/plugins/module_utils/rest/results.py new file mode 100644 index 00000000..59281683 --- /dev/null +++ b/plugins/module_utils/rest/results.py @@ -0,0 +1,1178 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# pylint: disable=too-many-instance-attributes,too-many-public-methods,line-too-long,too-many-lines +""" +Exposes public class Results to collect results across Ansible tasks. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + + +import copy +import logging +from typing import Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, + ValidationError, + field_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum, OperationType + + +class ApiCallResult(BaseModel): + """ + # Summary + + Pydantic model for a single task result. + + Represents all data for one API call including its response, result, diff, + and metadata. Immutable after creation to prevent accidental modification + of registered results. + + ## Raises + + - `ValidationError`: if field validation fails during instantiation + + ## Attributes + + - `sequence_number`: Unique sequence number for this task (required, >= 1) + - `path`: API endpoint path (required) + - `verb`: HTTP verb as string (required) + - `payload`: Request payload dict, or None for GET requests + - `verbosity_level`: Verbosity level for output filtering (required, 1-6) + - `response`: Controller response dict (required) + - `result`: Handler result dict (required) + - `diff`: Changes dict (required, can be empty) + - `metadata`: Task metadata dict (required) + - `changed`: Whether this task resulted in changes (required) + - `failed`: Whether this task failed (required) + """ + + model_config = ConfigDict(extra="forbid", frozen=True) + + sequence_number: int = Field(ge=1) + path: str + verb: str + payload: Optional[dict[str, Any]] = None + verbosity_level: int = Field(ge=1, le=6) + response: dict[str, Any] + result: dict[str, Any] + diff: dict[str, Any] + metadata: dict[str, Any] + changed: bool + failed: bool + + @field_validator("verb", mode="before") + @classmethod + def _coerce_verb_to_str(cls, value: Any) -> str: + """Coerce HttpVerbEnum to string.""" + if isinstance(value, HttpVerbEnum): + return value.value + return value + + +class FinalResultData(BaseModel): + """ + # Summary + + Pydantic model for the final aggregated result. + + This is the structure returned to Ansible's `exit_json`/`fail_json`. + Contains aggregated data from all registered tasks. + + ## Raises + + - `ValidationError`: if field validation fails during instantiation + + ## Attributes + + - `changed`: Overall changed status across all tasks (required) + - `failed`: Overall failed status across all tasks (required) + - `diff`: List of all diff dicts (default empty list) + - `metadata`: List of all metadata dicts (default empty list) + - `path`: List of API endpoint paths per API call (default empty list) + - `payload`: List of request payloads per API call (default empty list) + - `response`: List of all response dicts (default empty list) + - `result`: List of all result dicts (default empty list) + - `verb`: List of HTTP verbs per API call (default empty list) + - `verbosity_level`: List of verbosity levels per API call (default empty list) + """ + + model_config = ConfigDict(extra="forbid") + + changed: bool + failed: bool + diff: list[dict[str, Any]] = Field(default_factory=list) + metadata: list[dict[str, Any]] = Field(default_factory=list) + path: list[str] = Field(default_factory=list) + payload: list[Optional[dict[str, Any]]] = Field(default_factory=list) + response: list[dict[str, Any]] = Field(default_factory=list) + result: list[dict[str, Any]] = Field(default_factory=list) + verb: list[str] = Field(default_factory=list) + verbosity_level: list[int] = Field(default_factory=list) + + +class PendingApiCall(BaseModel): + """ + # Summary + + Pydantic model for the current task data being built. + + Mutable model used to stage data for the current task before + it's registered and converted to an immutable `ApiCallResult`. + Provides validation while allowing flexibility during the build phase. + + ## Raises + + - `ValidationError`: if field validation fails during instantiation or assignment + + ## Attributes + + - `response`: Controller response dict (default empty dict) + - `result`: Handler result dict (default empty dict) + - `diff`: Changes dict (default empty dict) + - `action`: Action name for metadata (default empty string) + - `state`: Ansible state for metadata (default empty string) + - `check_mode`: Check mode flag for metadata (default False) + - `operation_type`: Operation type determining if changes might occur (default QUERY) + - `path`: API endpoint path (default empty string) + - `verb`: HTTP verb (default GET) + - `payload`: Request payload dict, or None for GET requests + - `verbosity_level`: Verbosity level for output filtering (default 3, range 1-6) + """ + + model_config = ConfigDict(extra="allow", validate_assignment=True) + + response: dict[str, Any] = Field(default_factory=dict) + result: dict[str, Any] = Field(default_factory=dict) + diff: dict[str, Any] = Field(default_factory=dict) + action: str = "" + state: str = "" + check_mode: bool = False + operation_type: OperationType = OperationType.QUERY + path: str = "" + verb: HttpVerbEnum = HttpVerbEnum.GET + payload: Optional[dict[str, Any]] = None + verbosity_level: int = Field(default=3, ge=1, le=6) + + +class Results: + """ + # Summary + + Collect and aggregate results across tasks using Pydantic data models. + + ## Raises + + - `TypeError`: if properties are not of the correct type + - `ValueError`: if Pydantic validation fails or required data is missing + + ## Architecture + + This class uses a three-model Pydantic architecture for data validation: + + 1. `PendingApiCall` - Mutable staging area for building the current task + 2. `ApiCallResult` - Immutable registered API call result with validation (frozen=True) + 3. `FinalResultData` - Aggregated result for Ansible output + + The lifecycle is: **Build (Current) → Register (Task) → Aggregate (Final)** + + ## Description + + Provides a mechanism to collect results across tasks. The task classes + must support this Results class. Specifically, they must implement the + following: + + 1. Accept an instantiation of `Results()` + - Typically a class property is used for this + 2. Populate the `Results` instance with the current task data + - Set properties: `response_current`, `result_current`, `diff_current` + - Set metadata properties: `action`, `state`, `check_mode`, `operation_type` + 3. Optional. Register the task result with `Results.register_api_call()` + - Converts current task to immutable `ApiCallResult` + - Validates data with Pydantic + - Resets current task for next registration + - Tasks are NOT required to be registered. There are cases where + a task's information would not be useful to an end-user. If this + is the case, the task can simply not be registered. + + `Results` should be instantiated in the main Ansible Task class and + passed to all other task classes for which results are to be collected. + The task classes should populate the `Results` instance with the results + of the task and then register the results with `Results.register_api_call()`. + + This may be done within a separate class (as in the example below, where + the `FabricDelete()` class is called from the `TaskDelete()` class. + The `Results` instance can then be used to build the final result, by + calling `Results.build_final_result()`. + + ## Example Usage + + We assume an Ansible module structure as follows: + + - `TaskCommon()`: Common methods used by the various ansible + state classes. + - `TaskDelete(TaskCommon)`: Implements the delete state + - `TaskMerge(TaskCommon)`: Implements the merge state + - `TaskQuery(TaskCommon)`: Implements the query state + - etc... + + In TaskCommon, `Results` is instantiated and, hence, is inherited by all + state classes.: + + ```python + class TaskCommon: + def __init__(self): + self._results = Results() + + @property + def results(self) -> Results: + ''' + An instance of the Results class. + ''' + return self._results + + @results.setter + def results(self, value: Results) -> None: + self._results = value + ``` + + In each of the state classes (TaskDelete, TaskMerge, TaskQuery, etc...) + a class is instantiated (in the example below, FabricDelete) that + supports collecting results for the Results instance: + + ```python + class TaskDelete(TaskCommon): + def __init__(self, ansible_module): + super().__init__(ansible_module) + self.fabric_delete = FabricDelete(self.ansible_module) + + def commit(self): + ''' + delete the fabric + ''' + ... + self.fabric_delete.fabric_names = ["FABRIC_1", "FABRIC_2"] + self.fabric_delete.results = self.results + # results.register_api_call() is optionally called within the + # commit() method of the FabricDelete class. + self.fabric_delete.commit() + ``` + + Finally, within the main() method of the Ansible module, the final result + is built by calling Results.build_final_result(): + + ```python + if ansible_module.params["state"] == "deleted": + task = TaskDelete(ansible_module) + task.commit() + elif ansible_module.params["state"] == "merged": + task = TaskDelete(ansible_module) + task.commit() + # etc, for other states... + + # Build the final result + task.results.build_final_result() + + # Call fail_json() or exit_json() based on the final result + if True in task.results.failed: + ansible_module.fail_json(**task.results.final_result) + ansible_module.exit_json(**task.results.final_result) + ``` + + results.final_result will be a dict with the following structure + + ```json + { + "changed": True, # or False + "failed": True, # or False + "diff": { + [{"diff1": "diff"}, {"diff2": "diff"}, {"etc...": "diff"}], + } + "response": { + [{"response1": "response"}, {"response2": "response"}, {"etc...": "response"}], + } + "result": { + [{"result1": "result"}, {"result2": "result"}, {"etc...": "result"}], + } + "metadata": { + [{"metadata1": "metadata"}, {"metadata2": "metadata"}, {"etc...": "metadata"}], + } + } + ``` + + diff, response, and result dicts are per the Ansible ND Collection standard output. + + An example of a result dict would be (sequence_number is added by Results): + + ```json + { + "found": true, + "sequence_number": 1, + "success": true + } + ``` + + An example of a metadata dict would be (sequence_number is added by Results): + + + ```json + { + "action": "merge", + "check_mode": false, + "state": "merged", + "sequence_number": 1 + } + ``` + + `sequence_number` indicates the order in which the task was registered + with `Results`. It provides a way to correlate the diff, response, + result, and metadata across all tasks. + + ## Typical usage within a task class such as FabricDelete + + ```python + from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType + from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results + from ansible_collections.cisco.nd.plugins.module_utils.rest.rest_send import RestSend + ... + class FabricDelete: + def __init__(self, ansible_module): + ... + self.action: str = "fabric_delete" + self.operation_type: OperationType = OperationType.DELETE # Determines if changes might occur + self._rest_send: RestSend = RestSend(params) + self._results: Results = Results() + ... + + def commit(self): + ... + # Set current task data (no need to manually track changed/failed) + self._results.response_current = self._rest_send.response_current + self._results.result_current = self._rest_send.result_current + self._results.diff_current = {} # or actual diff if available + # register_api_call() determines changed/failed automatically + self._results.register_api_call() + ... + + @property + def results(self) -> Results: + ''' + An instance of the Results class. + ''' + return self._results + @results.setter + def results(self, value: Results) -> None: + self._results = value + self._results.action = self.action + self._results.operation_type = self.operation_type + """ + + def __init__(self) -> None: + self.class_name: str = self.__class__.__name__ + + self.log: logging.Logger = logging.getLogger(f"nd.{self.class_name}") + + # Task sequence tracking + self.task_sequence_number: int = 0 + + # Registered tasks (immutable after registration) + self._tasks: list[ApiCallResult] = [] + + # Current task being built (mutable) + self._current: PendingApiCall = PendingApiCall() + + # Final result (built on demand) + self._final_result: Optional[FinalResultData] = None + + msg = f"ENTERED {self.class_name}():" + self.log.debug(msg) + + def _increment_task_sequence_number(self) -> None: + """ + # Summary + + Increment a unique task sequence number. + + ## Raises + + None + """ + self.task_sequence_number += 1 + msg = f"self.task_sequence_number: {self.task_sequence_number}" + self.log.debug(msg) + + def _determine_if_changed(self) -> bool: + """ + # Summary + + Determine if the current task resulted in changes. + + This is a private helper method used during task registration. + Checks operation type, check mode, explicit changed flag, + and diff content to determine if changes occurred. + + ## Raises + + None + + ## Returns + + - `bool`: True if changes occurred, False otherwise + """ + method_name: str = "_determine_if_changed" + + msg = f"{self.class_name}.{method_name}: ENTERED: " + msg += f"action={self._current.action}, " + msg += f"operation_type={self._current.operation_type}, " + msg += f"state={self._current.state}, " + msg += f"check_mode={self._current.check_mode}" + self.log.debug(msg) + + # Early exit for read-only operations + if self._current.check_mode or self._current.operation_type.is_read_only(): + msg = f"{self.class_name}.{method_name}: No changes (read-only operation)" + self.log.debug(msg) + return False + + # Check explicit changed flag in result + changed_flag = self._current.result.get("changed") + if changed_flag is not None: + msg = f"{self.class_name}.{method_name}: changed={changed_flag} (from result)" + self.log.debug(msg) + return changed_flag + + # Check if diff has content (besides sequence_number) + has_diff_content = any(key != "sequence_number" for key in self._current.diff) + + msg = f"{self.class_name}.{method_name}: changed={has_diff_content} (from diff)" + self.log.debug(msg) + return has_diff_content + + def register_api_call(self) -> None: + """ + # Summary + + Register the current task result. + + Converts `PendingApiCall` to immutable `ApiCallResult`, increments + sequence number, and aggregates changed/failed status. The current task + is then reset for the next task. + + ## Raises + + - `ValueError`: if Pydantic validation fails for task result data + - `ValueError`: if required fields are missing + + ## Description + + 1. Increment the task sequence number + 2. Build metadata from current task properties + 3. Determine if anything changed using `_determine_if_changed()` + 4. Determine if task failed based on `result["success"]` flag + 5. Add sequence_number to response, result, and diff + 6. Create immutable `ApiCallResult` with validation + 7. Register the task and update aggregated changed/failed sets + 8. Reset current task for next registration + """ + method_name: str = "register_api_call" + + msg = f"{self.class_name}.{method_name}: " + msg += f"ENTERED: action={self._current.action}, " + msg += f"result_current={self._current.result}" + self.log.debug(msg) + + # Increment sequence number + self._increment_task_sequence_number() + + # Build metadata from current task + metadata = { + "action": self._current.action, + "check_mode": self._current.check_mode, + "sequence_number": self.task_sequence_number, + "state": self._current.state, + } + + # Determine changed status + changed = self._determine_if_changed() + + # Determine failed status from result + success = self._current.result.get("success") + if success is True: + failed = False + elif success is False: + failed = True + else: + msg = f"{self.class_name}.{method_name}: " + msg += "result['success'] is not a boolean. " + msg += f"result={self._current.result}. " + msg += "Setting failed=False." + self.log.debug(msg) + failed = False + + # Add sequence_number to response, result, diff + response = copy.deepcopy(self._current.response) + response["sequence_number"] = self.task_sequence_number + + result = copy.deepcopy(self._current.result) + result["sequence_number"] = self.task_sequence_number + + diff = copy.deepcopy(self._current.diff) + diff["sequence_number"] = self.task_sequence_number + + # Create immutable ApiCallResult with validation + try: + task_data = ApiCallResult( + sequence_number=self.task_sequence_number, + path=self._current.path, + verb=self._current.verb, + payload=copy.deepcopy(self._current.payload) if self._current.payload is not None else None, + verbosity_level=self._current.verbosity_level, + response=response, + result=result, + diff=diff, + metadata=metadata, + changed=changed, + failed=failed, + ) + except ValidationError as error: + msg = f"{self.class_name}.{method_name}: " + msg += f"Validation failed for task result: {error}" + raise ValueError(msg) from error + + # Register the task + self._tasks.append(task_data) + + # Reset current task for next task + self._current = PendingApiCall() + + # Log registration + if self.log.isEnabledFor(logging.DEBUG): + msg = f"{self.class_name}.{method_name}: " + msg += f"Registered task {self.task_sequence_number}: " + msg += f"changed={changed}, failed={failed}" + self.log.debug(msg) + + def build_final_result(self) -> None: + """ + # Summary + + Build the final result from all registered tasks. + + Creates a `FinalResultData` Pydantic model with aggregated + changed/failed status and all task data. The model is stored + internally and can be accessed via the `final_result` property. + + ## Raises + + - `ValueError`: if Pydantic validation fails for final result + + ## Description + + The final result consists of the following: + + ```json + { + "changed": True, # or False + "failed": True, + "diff": { + [], + }, + "response": { + [], + }, + "result": { + [], + }, + "metadata": { + [], + } + ``` + """ + method_name: str = "build_final_result" + + msg = f"{self.class_name}.{method_name}: " + msg += f"changed={self.changed}, failed={self.failed}" + self.log.debug(msg) + + # Aggregate data from all tasks + diff_list = [task.diff for task in self._tasks] + metadata_list = [task.metadata for task in self._tasks] + path_list = [task.path for task in self._tasks] + payload_list = [task.payload for task in self._tasks] + response_list = [task.response for task in self._tasks] + result_list = [task.result for task in self._tasks] + verb_list = [task.verb for task in self._tasks] + verbosity_level_list = [task.verbosity_level for task in self._tasks] + + # Create FinalResultData with validation + try: + self._final_result = FinalResultData( + changed=True in self.changed, + failed=True in self.failed, + diff=diff_list, + metadata=metadata_list, + path=path_list, + payload=payload_list, + response=response_list, + result=result_list, + verb=verb_list, + verbosity_level=verbosity_level_list, + ) + except ValidationError as error: + msg = f"{self.class_name}.{method_name}: " + msg += f"Validation failed for final result: {error}" + raise ValueError(msg) from error + + msg = f"{self.class_name}.{method_name}: " + msg += f"Built final result: changed={self._final_result.changed}, " + msg += f"failed={self._final_result.failed}, " + msg += f"tasks={len(self._tasks)}" + self.log.debug(msg) + + @property + def final_result(self) -> dict[str, Any]: + """ + # Summary + + Return the final result as a dict for Ansible `exit_json`/`fail_json`. + + ## Raises + + - `ValueError`: if `build_final_result()` hasn't been called + + ## Returns + + - `dict[str, Any]`: The final result dictionary with all aggregated data + """ + if self._final_result is None: + msg = f"{self.class_name}.final_result: " + msg += "build_final_result() must be called before accessing final_result" + raise ValueError(msg) + return self._final_result.model_dump() + + @property + def failed_result(self) -> dict[str, Any]: + """ + # Summary + + Return a result for a failed task with no changes + + ## Raises + + None + """ + result: dict = {} + result["changed"] = False + result["failed"] = True + result["diff"] = [{}] + result["response"] = [{}] + result["result"] = [{}] + return result + + @property + def ok_result(self) -> dict[str, Any]: + """ + # Summary + + Return a result for a successful task with no changes + + ## Raises + + None + """ + result: dict = {} + result["changed"] = False + result["failed"] = False + result["diff"] = [{}] + result["response"] = [{}] + result["result"] = [{}] + return result + + @property + def action(self) -> str: + """ + # Summary + + Action name for the current task. + + Used in metadata to indicate the action that was taken. + + ## Raises + + None + """ + return self._current.action + + @action.setter + def action(self, value: str) -> None: + method_name: str = "action" + if not isinstance(value, str): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a string. Got {type(value).__name__}." + raise TypeError(msg) + self._current.action = value + + @property + def operation_type(self) -> OperationType: + """ + # Summary + + The operation type for the current operation. + + Used to determine if the operation might change controller state. + + ## Raises + + None + + ## Returns + + The current operation type (`OperationType` enum value) + """ + return self._current.operation_type + + @operation_type.setter + def operation_type(self, value: OperationType) -> None: + """ + # Summary + + Set the operation type for the current task. + + ## Raises + + - `TypeError`: if value is not an `OperationType` instance + + ## Parameters + + - value: The operation type to set (must be an `OperationType` enum value) + """ + method_name: str = "operation_type" + if not isinstance(value, OperationType): + msg = f"{self.class_name}.{method_name}: " + msg += "value must be an OperationType instance. " + msg += f"Got type {type(value).__name__}, value {value}." + raise TypeError(msg) + self._current.operation_type = value + + @property + def changed(self) -> set[bool]: + """ + # Summary + + Returns a set() containing boolean values indicating whether anything changed. + + Derived from the `changed` attribute of all registered `ApiCallResult` tasks. + + ## Raises + + None + + ## Returns + + - A set() of boolean values indicating whether any tasks changed + + ## See also + + - `register_api_call()` method to register tasks. + """ + return {task.changed for task in self._tasks} + + @property + def check_mode(self) -> bool: + """ + # Summary + + Ansible check_mode flag for the current task. + + - `True` if check_mode is enabled, `False` otherwise. + + ## Raises + + None + """ + return self._current.check_mode + + @check_mode.setter + def check_mode(self, value: bool) -> None: + method_name: str = "check_mode" + if not isinstance(value, bool): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a bool. Got {type(value).__name__}." + raise TypeError(msg) + self._current.check_mode = value + + @property + def diffs(self) -> list[dict[str, Any]]: + """ + # Summary + + A list of dicts representing the changes made across all registered tasks. + + ## Raises + + None + + ## Returns + + - `list[dict[str, Any]]`: List of diff dictionaries from all registered tasks + """ + return [task.diff for task in self._tasks] + + @property + def diff_current(self) -> dict[str, Any]: + """ + # Summary + + A dict representing the current diff for the current task. + + ## Raises + + - setter: `TypeError` if value is not a dict + """ + return self._current.diff + + @diff_current.setter + def diff_current(self, value: dict[str, Any]) -> None: + method_name: str = "diff_current" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a dict. Got {type(value).__name__}." + raise TypeError(msg) + self._current.diff = value + + @property + def failed(self) -> set[bool]: + """ + # Summary + + A set() of boolean values indicating whether any tasks failed. + + Derived from the `failed` attribute of all registered `ApiCallResult` tasks. + + - If the set contains True, at least one task failed. + - If the set contains only False all tasks succeeded. + + ## Raises + + None + + ## See also + + - `register_api_call()` method to register tasks. + """ + return {task.failed for task in self._tasks} + + @property + def metadata(self) -> list[dict[str, Any]]: + """ + # Summary + + A list of dicts representing the metadata for all registered tasks. + + ## Raises + + None + + ## Returns + + - `list[dict[str, Any]]`: List of metadata dictionaries from all registered tasks + """ + return [task.metadata for task in self._tasks] + + @property + def metadata_current(self) -> dict[str, Any]: + """ + # Summary + + Return the current metadata which is comprised of the following properties: + + - action + - check_mode + - sequence_number + - state + + ## Raises + + None + """ + value: dict[str, Any] = {} + value["action"] = self.action + value["check_mode"] = self.check_mode + value["sequence_number"] = self.task_sequence_number + value["state"] = self.state + return value + + @property + def response_current(self) -> dict[str, Any]: + """ + # Summary + + Return a `dict` containing the current response from the controller for the current task. + + ## Raises + + - setter: `TypeError` if value is not a dict + """ + return self._current.response + + @response_current.setter + def response_current(self, value: dict[str, Any]) -> None: + method_name: str = "response_current" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a dict. Got {type(value).__name__}." + raise TypeError(msg) + self._current.response = value + + @property + def responses(self) -> list[dict[str, Any]]: + """ + # Summary + + Return the response list; `list` of `dict`, where each `dict` contains a + response from the controller across all registered tasks. + + ## Raises + + None + + ## Returns + + - `list[dict[str, Any]]`: List of response dictionaries from all registered tasks + """ + return [task.response for task in self._tasks] + + @property + def results(self) -> list[dict[str, Any]]: + """ + # Summary + + A `list` of `dict`, where each `dict` contains a result across all registered tasks. + + ## Raises + + None + + ## Returns + + - `list[dict[str, Any]]`: List of result dictionaries from all registered tasks + """ + return [task.result for task in self._tasks] + + @property + def result_current(self) -> dict[str, Any]: + """ + # Summary + + A `dict` representing the current result for the current task. + + ## Raises + + - setter: `TypeError` if value is not a dict + """ + return self._current.result + + @result_current.setter + def result_current(self, value: dict[str, Any]) -> None: + method_name: str = "result_current" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a dict. Got {type(value).__name__}." + raise TypeError(msg) + self._current.result = value + + @property + def state(self) -> str: + """ + # Summary + + The Ansible state for the current task. + + ## Raises + + - setter: `TypeError` if value is not a string + """ + return self._current.state + + @state.setter + def state(self, value: str) -> None: + method_name: str = "state" + if not isinstance(value, str): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a string. Got {type(value).__name__}." + raise TypeError(msg) + self._current.state = value + + @property + def path(self) -> list[str]: + """ + # Summary + + A list of API endpoint paths across all registered API calls. + + ## Raises + + None + + ## Returns + + - `list[str]`: List of path strings from all registered API calls + """ + return [task.path for task in self._tasks] + + @property + def path_current(self) -> str: + """ + # Summary + + The API endpoint path for the current task. + + ## Raises + + - setter: `TypeError` if value is not a string + """ + return self._current.path + + @path_current.setter + def path_current(self, value: str) -> None: + method_name: str = "path_current" + if not isinstance(value, str): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a string. Got {type(value).__name__}." + raise TypeError(msg) + self._current.path = value + + @property + def verb(self) -> list[str]: + """ + # Summary + + A list of HTTP verbs across all registered API calls. + + ## Raises + + None + + ## Returns + + - `list[str]`: List of verb strings from all registered API calls + """ + return [task.verb for task in self._tasks] + + @property + def verb_current(self) -> HttpVerbEnum: + """ + # Summary + + The HTTP verb for the current task. + + ## Raises + + - setter: `TypeError` if value is not an `HttpVerbEnum` instance + """ + return self._current.verb + + @verb_current.setter + def verb_current(self, value: HttpVerbEnum) -> None: + method_name: str = "verb_current" + if not isinstance(value, HttpVerbEnum): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be an HttpVerbEnum instance. Got {type(value).__name__}." + raise TypeError(msg) + self._current.verb = value + + @property + def payload(self) -> list[Optional[dict[str, Any]]]: + """ + # Summary + + A list of request payloads across all registered API calls. + + ## Raises + + None + + ## Returns + + - `list[Optional[dict[str, Any]]]`: List of payload dicts (or None) from all registered API calls + """ + return [task.payload for task in self._tasks] + + @property + def payload_current(self) -> Optional[dict[str, Any]]: + """ + # Summary + + The request payload for the current task. + + ## Raises + + - setter: `TypeError` if value is not a dict or None + """ + return self._current.payload + + @payload_current.setter + def payload_current(self, value: Optional[dict[str, Any]]) -> None: + method_name: str = "payload_current" + if value is not None and not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be a dict or None. Got {type(value).__name__}." + raise TypeError(msg) + self._current.payload = value + + @property + def verbosity_level(self) -> list[int]: + """ + # Summary + + A list of verbosity levels across all registered API calls. + + ## Raises + + None + + ## Returns + + - `list[int]`: List of verbosity levels from all registered API calls + """ + return [task.verbosity_level for task in self._tasks] + + @property + def verbosity_level_current(self) -> int: + """ + # Summary + + The verbosity level for the current task. + + ## Raises + + - setter: `TypeError` if value is not an int + - setter: `ValueError` if value is not in range 1-6 + """ + return self._current.verbosity_level + + @verbosity_level_current.setter + def verbosity_level_current(self, value: int) -> None: + method_name: str = "verbosity_level_current" + if isinstance(value, bool) or not isinstance(value, int): + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be an int. Got {type(value).__name__}." + raise TypeError(msg) + if value < 1 or value > 6: + msg = f"{self.class_name}.{method_name}: " + msg += f"value must be between 1 and 6. Got {value}." + raise ValueError(msg) + self._current.verbosity_level = value diff --git a/plugins/module_utils/rest/sender_nd.py b/plugins/module_utils/rest/sender_nd.py new file mode 100644 index 00000000..ae333dd0 --- /dev/null +++ b/plugins/module_utils/rest/sender_nd.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Sender module conforming to SenderProtocol. + +See plugins/module_utils/protocol_sender.py for the protocol definition. +""" + +# isort: off +# fmt: off +from __future__ import (absolute_import, division, print_function) +from __future__ import annotations +# fmt: on +# isort: on + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import copy +import inspect +import json +import logging +from typing import Any, Optional + +from ansible.module_utils.basic import AnsibleModule # type: ignore +from ansible.module_utils.connection import Connection # type: ignore +from ansible.module_utils.connection import ConnectionError as AnsibleConnectionError +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum + + +class Sender: + """ + # Summary + + An injected dependency for `RestSend` which implements the + `sender` interface. Responses are retrieved using the Ansible HttpApi plugin. + + For the `sender` interface definition, see `plugins/module_utils/protocol_sender.py`. + + ## Raises + + - `ValueError` if: + - `ansible_module` is not set. + - `path` is not set. + - `verb` is not set. + - `TypeError` if: + - `ansible_module` is not an instance of AnsibleModule. + - `payload` is not a `dict`. + - `response` is not a `dict`. + + ## Usage + + `ansible_module` is an instance of `AnsibleModule`. + + ```python + sender = Sender() + try: + sender.ansible_module = ansible_module + rest_send = RestSend() + rest_send.sender = sender + except (TypeError, ValueError) as error: + handle_error(error) + # etc... + # See rest_send.py for RestSend() usage. + ``` + """ + + def __init__( + self, + ansible_module: Optional[AnsibleModule] = None, + verb: Optional[HttpVerbEnum] = None, + path: Optional[str] = None, + payload: Optional[dict[str, Any]] = None, + ) -> None: + self.class_name = self.__class__.__name__ + + self.log = logging.getLogger(f"nd.{self.class_name}") + + self._ansible_module: Optional[AnsibleModule] = ansible_module + self._connection: Optional[Connection] = None + + self._path: Optional[str] = path + self._payload: Optional[dict[str, Any]] = payload + self._response: Optional[dict[str, Any]] = None + self._verb: Optional[HttpVerbEnum] = verb + + msg = "ENTERED Sender(): " + self.log.debug(msg) + + def _get_caller_name(self) -> str: + """ + # Summary + + Get the name of the method that called the current method. + + ## Raises + + None + + ## Returns + + - `str`: The name of the calling method + """ + return inspect.stack()[2][3] + + def commit(self) -> None: + """ + # Summary + + Send the request to the controller + + ## Raises + + - `ValueError` if there is an error with the connection to the controller. + + ## Properties read + + - `verb`: HTTP verb e.g. GET, POST, PATCH, PUT, DELETE + - `path`: HTTP path e.g. /api/v1/some_endpoint + - `payload`: Optional HTTP payload + + ## Properties written + + - `response`: raw response from the controller + """ + method_name = "commit" + caller = self._get_caller_name() + + if self._connection is None: + self._connection = Connection(self.ansible_module._socket_path) # pylint: disable=protected-access + self._connection.set_params(self.ansible_module.params) + + msg = f"{self.class_name}.{method_name}: " + msg += f"caller: {caller}. " + msg += "Calling Connection().send_request: " + msg += f"verb {self.verb.value}, path {self.path}" + try: + if self.payload is None: + self.log.debug(msg) + response = self._connection.send_request(self.verb.value, self.path) + else: + msg += ", payload: " + msg += f"{json.dumps(self.payload, indent=4, sort_keys=True)}" + self.log.debug(msg) + response = self._connection.send_request( + self.verb.value, + self.path, + json.dumps(self.payload), + ) + # Normalize response: if JSON parsing failed, DATA will be None + # and raw content will be in the "raw" key. Convert to consistent format. + response = self._normalize_response(response) + self.response = response + except AnsibleConnectionError as error: + msg = f"{self.class_name}.{method_name}: " + msg += f"ConnectionError occurred: {error}" + self.log.error(msg) + raise ValueError(msg) from error + except Exception as error: + msg = f"{self.class_name}.{method_name}: " + msg += f"Unexpected error occurred: {error}" + self.log.error(msg) + raise ValueError(msg) from error + + def _normalize_response(self, response: dict) -> dict: + """ + # Summary + + Normalize the HttpApi response to ensure consistent format. + + If the HttpApi plugin failed to parse the response as JSON, the + `DATA` key will be None and the raw response content will be in + the `raw` key. This method converts such responses to a consistent + format where `DATA` contains a dict with the raw content. + + ## Parameters + + - `response`: The response dict from the HttpApi plugin. + + ## Returns + + The normalized response dict. + """ + if response.get("DATA") is None and response.get("raw") is not None: + response["DATA"] = {"raw_response": response.get("raw")} + # If MESSAGE is just the HTTP reason phrase, enhance it + if response.get("MESSAGE") in ("OK", None): + response["MESSAGE"] = "Response could not be parsed as JSON" + return response + + @property + def ansible_module(self) -> AnsibleModule: + """ + # Summary + + The AnsibleModule instance to use for this sender. + + ## Raises + + - `ValueError` if ansible_module is not set. + """ + if self._ansible_module is None: + msg = f"{self.class_name}.ansible_module: " + msg += "ansible_module must be set before accessing ansible_module." + raise ValueError(msg) + return self._ansible_module + + @ansible_module.setter + def ansible_module(self, value: AnsibleModule): + self._ansible_module = value + + @property + def path(self) -> str: + """ + # Summary + + Endpoint path for the REST request. + + ## Raises + + - getter: `ValueError` if `path` is not set before accessing. + + ## Example + + ``/appcenter/cisco/ndfc/api/v1/...etc...`` + """ + if self._path is None: + msg = f"{self.class_name}.path: " + msg += "path must be set before accessing path." + raise ValueError(msg) + return self._path + + @path.setter + def path(self, value: str): + self._path = value + + @property + def payload(self) -> Optional[dict[str, Any]]: + """ + # Summary + + Return the payload to send to the controller + + ## Raises + - `TypeError` if value is not a `dict`. + """ + return self._payload + + @payload.setter + def payload(self, value: dict): + method_name = "payload" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a dict. " + msg += f"Got type {type(value).__name__}, " + msg += f"value {value}." + raise TypeError(msg) + self._payload = value + + @property + def response(self) -> dict: + """ + # Summary + + The response from the controller. + + - getter: Return a deepcopy of `response` + - setter: Set `response` + + ## Raises + + - getter: `ValueError` if response is not set. + - setter: `TypeError` if value is not a `dict`. + """ + if self._response is None: + msg = f"{self.class_name}.response: " + msg += "response must be set before accessing response." + raise ValueError(msg) + return copy.deepcopy(self._response) + + @response.setter + def response(self, value: dict): + method_name = "response" + if not isinstance(value, dict): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be a dict. " + msg += f"Got type {type(value).__name__}, " + msg += f"value {value}." + raise TypeError(msg) + self._response = value + + @property + def verb(self) -> HttpVerbEnum: + """ + # Summary + + HTTP method for the REST request. + + ## Raises + + - getter: `ValueError` if verb is not set. + - setter: `TypeError` if value is not a `HttpVerbEnum`. + """ + if self._verb is None: + msg = f"{self.class_name}.verb: " + msg += "verb must be set before accessing verb." + raise ValueError(msg) + return self._verb + + @verb.setter + def verb(self, value: HttpVerbEnum): + method_name = "verb" + if value not in HttpVerbEnum.values(): + msg = f"{self.class_name}.{method_name}: " + msg += f"{method_name} must be one of {HttpVerbEnum.values()}. " + msg += f"Got {value}." + raise TypeError(msg) + self._verb = value diff --git a/tests/sanity/requirements.txt b/tests/sanity/requirements.txt index 8ea87eb9..2bc68e74 100644 --- a/tests/sanity/requirements.txt +++ b/tests/sanity/requirements.txt @@ -1,4 +1,4 @@ packaging # needed for update-bundled and changelog -sphinx ; python_version >= '3.5' # docs build requires python 3+ -sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+ -straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+ \ No newline at end of file +sphinx +sphinx-notfound-page +straight.plugin diff --git a/tests/unit/module_utils/fixtures/fixture_data/test_rest_send.json b/tests/unit/module_utils/fixtures/fixture_data/test_rest_send.json new file mode 100644 index 00000000..88aa460a --- /dev/null +++ b/tests/unit/module_utils/fixtures/fixture_data/test_rest_send.json @@ -0,0 +1,244 @@ +{ + "TEST_NOTES": [ + "Fixture data for test_rest_send.py tests", + "Provides mock controller responses for REST operations" + ], + "test_rest_send_00100a": { + "TEST_NOTES": ["Successful GET request response"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/endpoint", + "MESSAGE": "OK", + "DATA": { + "status": "success", + "result": "test data" + } + }, + "test_rest_send_00110a": { + "TEST_NOTES": ["Successful POST request response"], + "RETURN_CODE": 200, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/create", + "MESSAGE": "Created", + "DATA": { + "id": "12345", + "status": "created" + } + }, + "test_rest_send_00120a": { + "TEST_NOTES": ["Successful PUT request response"], + "RETURN_CODE": 200, + "METHOD": "PUT", + "REQUEST_PATH": "/api/v1/test/update/12345", + "MESSAGE": "Updated", + "DATA": { + "id": "12345", + "status": "updated" + } + }, + "test_rest_send_00130a": { + "TEST_NOTES": ["Successful DELETE request response"], + "RETURN_CODE": 200, + "METHOD": "DELETE", + "REQUEST_PATH": "/api/v1/test/delete/12345", + "MESSAGE": "Deleted", + "DATA": { + "id": "12345", + "status": "deleted" + } + }, + "test_rest_send_00200a": { + "TEST_NOTES": ["Failed request - 404 Not Found"], + "RETURN_CODE": 404, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/notfound", + "MESSAGE": "Not Found", + "DATA": { + "error": "Resource not found" + } + }, + "test_rest_send_00210a": { + "TEST_NOTES": ["Failed request - 400 Bad Request"], + "RETURN_CODE": 400, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/badrequest", + "MESSAGE": "Bad Request", + "DATA": { + "error": "Invalid payload" + } + }, + "test_rest_send_00220a": { + "TEST_NOTES": ["Failed request - 500 Internal Server Error"], + "RETURN_CODE": 500, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/servererror", + "MESSAGE": "Internal Server Error", + "DATA": { + "error": "Server error occurred" + } + }, + "test_rest_send_00300a": { + "TEST_NOTES": ["First response in retry sequence - failure"], + "RETURN_CODE": 500, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/retry", + "MESSAGE": "Internal Server Error", + "DATA": { + "error": "Temporary error" + } + }, + "test_rest_send_00300b": { + "TEST_NOTES": ["Second response in retry sequence - success"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/retry", + "MESSAGE": "OK", + "DATA": { + "status": "success", + "result": "data after retry" + } + }, + "test_rest_send_00400a": { + "TEST_NOTES": ["GET request successful response"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/endpoint", + "MESSAGE": "OK", + "DATA": { + "status": "success" + } + }, + "test_rest_send_00410a": { + "TEST_NOTES": ["POST request successful response"], + "RETURN_CODE": 200, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/create", + "MESSAGE": "OK", + "DATA": { + "status": "created" + } + }, + "test_rest_send_00420a": { + "TEST_NOTES": ["PUT request successful response"], + "RETURN_CODE": 200, + "METHOD": "PUT", + "REQUEST_PATH": "/api/v1/test/update/12345", + "MESSAGE": "OK", + "DATA": { + "status": "updated" + } + }, + "test_rest_send_00430a": { + "TEST_NOTES": ["DELETE request successful response"], + "RETURN_CODE": 200, + "METHOD": "DELETE", + "REQUEST_PATH": "/api/v1/test/delete/12345", + "MESSAGE": "OK", + "DATA": { + "status": "deleted" + } + }, + "test_rest_send_00500a": { + "TEST_NOTES": ["404 Not Found response"], + "RETURN_CODE": 404, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/notfound", + "MESSAGE": "Not Found", + "DATA": { + "error": "Resource not found" + } + }, + "test_rest_send_00510a": { + "TEST_NOTES": ["400 Bad Request response"], + "RETURN_CODE": 400, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/badrequest", + "MESSAGE": "Bad Request", + "DATA": { + "error": "Invalid request data" + } + }, + "test_rest_send_00520a": { + "TEST_NOTES": ["500 Internal Server Error response"], + "RETURN_CODE": 500, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/servererror", + "MESSAGE": "Internal Server Error", + "DATA": { + "error": "Server error occurred" + } + }, + "test_rest_send_00600a": { + "TEST_NOTES": ["First response - 500 error for retry test"], + "RETURN_CODE": 500, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/retry", + "MESSAGE": "Internal Server Error", + "DATA": { + "error": "Temporary error" + } + }, + "test_rest_send_00600b": { + "TEST_NOTES": ["Second response - success after retry"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/retry", + "MESSAGE": "OK", + "DATA": { + "status": "success" + } + }, + "test_rest_send_00600c": { + "TEST_NOTES": ["Multiple sequential requests - third"], + "RETURN_CODE": 200, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/multi/create", + "MESSAGE": "Created", + "DATA": { + "id": 3, + "name": "third", + "status": "created" + } + }, + "test_rest_send_00700a": { + "TEST_NOTES": ["First sequential GET"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/multi/1", + "MESSAGE": "OK", + "DATA": { + "id": 1 + } + }, + "test_rest_send_00700b": { + "TEST_NOTES": ["Second sequential GET"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/multi/2", + "MESSAGE": "OK", + "DATA": { + "id": 2 + } + }, + "test_rest_send_00700c": { + "TEST_NOTES": ["Third sequential POST"], + "RETURN_CODE": 200, + "METHOD": "POST", + "REQUEST_PATH": "/api/v1/test/multi/create", + "MESSAGE": "OK", + "DATA": { + "id": 3, + "status": "created" + } + }, + "test_rest_send_00900a": { + "TEST_NOTES": ["Response for deepcopy test"], + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "/api/v1/test/endpoint", + "MESSAGE": "OK", + "DATA": { + "status": "success" + } + } +} diff --git a/tests/unit/module_utils/fixtures/load_fixture.py b/tests/unit/module_utils/fixtures/load_fixture.py new file mode 100644 index 00000000..ec5a84d3 --- /dev/null +++ b/tests/unit/module_utils/fixtures/load_fixture.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Function to load test inputs from JSON files. +""" + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import json +import os +import sys + +fixture_path = os.path.join(os.path.dirname(__file__), "fixture_data") + + +def load_fixture(filename): + """ + load test inputs from json files + """ + path = os.path.join(fixture_path, f"{filename}.json") + + try: + with open(path, encoding="utf-8") as file_handle: + data = file_handle.read() + except IOError as exception: + msg = f"Exception opening test input file {filename}.json : " + msg += f"Exception detail: {exception}" + print(msg) + sys.exit(1) + + try: + fixture = json.loads(data) + except json.JSONDecodeError as exception: + msg = "Exception reading JSON contents in " + msg += f"test input file {filename}.json : " + msg += f"Exception detail: {exception}" + print(msg) + sys.exit(1) + + return fixture diff --git a/tests/unit/module_utils/mock_ansible_module.py b/tests/unit/module_utils/mock_ansible_module.py new file mode 100644 index 00000000..d58397df --- /dev/null +++ b/tests/unit/module_utils/mock_ansible_module.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Mock AnsibleModule for unit testing. + +This module provides a mock implementation of Ansible's AnsibleModule +to avoid circular import issues between sender_file.py and common_utils.py. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + + +# Define base exception class +class AnsibleFailJson(Exception): + """ + Exception raised by MockAnsibleModule.fail_json() + """ + + +# Try to import AnsibleFailJson from ansible.netcommon if available +# This allows compatibility with tests that expect the netcommon version +try: + from ansible_collections.ansible.netcommon.tests.unit.modules.utils import AnsibleFailJson as _NetcommonFailJson + + # Use the netcommon version if available + AnsibleFailJson = _NetcommonFailJson # type: ignore[misc] +except ImportError: + # Use the local version defined above + pass + + +class MockAnsibleModule: + """ + # Summary + + Mock the AnsibleModule class for unit testing. + + ## Attributes + + - check_mode: Whether the module is running in check mode + - params: Module parameters dictionary + - argument_spec: Module argument specification + - supports_check_mode: Whether the module supports check mode + + ## Methods + + - fail_json: Raises AnsibleFailJson exception with the provided message + """ + + check_mode = False + + params = {"config": {"switches": [{"ip_address": "172.22.150.105"}]}} + argument_spec = { + "config": {"required": True, "type": "dict"}, + "state": {"default": "merged", "choices": ["merged", "deleted", "query"]}, + "check_mode": False, + } + supports_check_mode = True + + @staticmethod + def fail_json(msg, **kwargs) -> AnsibleFailJson: + """ + # Summary + + Mock the fail_json method. + + ## Parameters + + - msg: Error message + - kwargs: Additional keyword arguments (ignored) + + ## Raises + + - AnsibleFailJson: Always raised with the provided message + """ + raise AnsibleFailJson(msg) + + def public_method_for_pylint(self): + """ + # Summary + + Add one public method to appease pylint. + + ## Raises + + None + """ diff --git a/tests/unit/module_utils/response_generator.py b/tests/unit/module_utils/response_generator.py new file mode 100644 index 00000000..e96aad70 --- /dev/null +++ b/tests/unit/module_utils/response_generator.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Response generator for unit tests. +""" + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + + +class ResponseGenerator: + """ + Given a coroutine which yields dictionaries, return the yielded items + with each call to the next property + + For usage in the context of dcnm_image_policy unit tests, see: + test: test_image_policy_create_bulk_00037 + file: tests/unit/modules/dcnm/dcnm_image_policy/test_image_policy_create_bulk.py + + Simplified usage example below. + + def responses(): + yield {"key1": "value1"} + yield {"key2": "value2"} + + gen = ResponseGenerator(responses()) + + print(gen.next) # {"key1": "value1"} + print(gen.next) # {"key2": "value2"} + """ + + def __init__(self, gen): + self.gen = gen + + @property + def next(self): + """ + Return the next item in the generator + """ + return next(self.gen) + + @property + def implements(self): + """ + ### Summary + Used by Sender() classes to verify Sender().gen is a + response generator which implements the response_generator + interfacee. + """ + return "response_generator" + + def public_method_for_pylint(self): + """ + Add one public method to appease pylint + """ diff --git a/tests/unit/module_utils/sender_file.py b/tests/unit/module_utils/sender_file.py new file mode 100644 index 00000000..7060e8c0 --- /dev/null +++ b/tests/unit/module_utils/sender_file.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Sender module conforming to SenderProtocol for file-based mock responses. + +See plugins/module_utils/protocol_sender.py for the protocol definition. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import copy +import inspect +import logging +from typing import Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.mock_ansible_module import MockAnsibleModule +from ansible_collections.cisco.nd.tests.unit.module_utils.response_generator import ResponseGenerator + + +class Sender: + """ + # Summary + + An injected dependency for `RestSend` which implements the + `sender` interface. Responses are read from JSON files. + + ## Raises + + - `ValueError` if: + - `gen` is not set. + - `TypeError` if: + - `gen` is not an instance of ResponseGenerator() + + ## Usage + + - `gen` is an instance of `ResponseGenerator()` which yields simulated responses. + In the example below, `responses()` is a generator that yields dictionaries. + However, in practice, it would yield responses read from JSON files. + - `responses()` is a coroutine that yields controller responses. + In the example below, it yields to dictionaries. However, in + practice, it would yield responses read from JSON files. + + ```python + def responses(): + yield {"key1": "value1"} + yield {"key2": "value2"} + + sender = Sender() + sender.gen = ResponseGenerator(responses()) + + try: + rest_send = RestSend() + rest_send.sender = sender + except (TypeError, ValueError) as error: + handle_error(error) + # etc... + # See rest_send.py for RestSend() usage. + ``` + """ + + def __init__(self) -> None: + self.class_name = self.__class__.__name__ + + self.log = logging.getLogger(f"nd.{self.class_name}") + + self._ansible_module: Optional[MockAnsibleModule] = None + self._gen: Optional[ResponseGenerator] = None + self._path: Optional[str] = None + self._payload: Optional[dict[str, Any]] = None + self._response: Optional[dict[str, Any]] = None + self._verb: Optional[HttpVerbEnum] = None + + self._raise_method: Optional[str] = None + self._raise_exception: Optional[BaseException] = None + + msg = "ENTERED Sender(): " + self.log.debug(msg) + + def commit(self) -> None: + """ + # Summary + + - Simulate a commit to a controller (does nothing). + - Allows to simulate exceptions for testing error handling in RestSend by setting the `raise_exception` and `raise_method` properties. + + ## Raises + + - `ValueError` if `gen` is not set. + - `self.raise_exception` if set and + `self.raise_method` == "commit" + """ + method_name = "commit" + + if self.raise_method == method_name and self.raise_exception is not None: + msg = f"{self.class_name}.{method_name}: " + msg += f"Simulated {type(self.raise_exception).__name__}." + raise self.raise_exception + + caller = inspect.stack()[1][3] + msg = f"{self.class_name}.{method_name}: " + msg += f"caller {caller}" + self.log.debug(msg) + + @property + def ansible_module(self) -> Optional[MockAnsibleModule]: + """ + # Summary + + Mock ansible_module + """ + return self._ansible_module + + @ansible_module.setter + def ansible_module(self, value: Optional[MockAnsibleModule]): + self._ansible_module = value + + @property + def gen(self) -> ResponseGenerator: + """ + # Summary + + The `ResponseGenerator()` instance which yields simulated responses. + + ## Raises + + - `ValueError` if `gen` is not set. + - `TypeError` if value is not a class implementing the `response_generator` interface. + """ + if self._gen is None: + msg = f"{self.class_name}.gen: gen must be set to a class implementing the response_generator interface." + raise ValueError(msg) + return self._gen + + @gen.setter + def gen(self, value: ResponseGenerator) -> None: + method_name = inspect.stack()[0][3] + msg = f"{self.class_name}.{method_name}: " + msg += "Expected a class implementing the " + msg += "response_generator interface. " + msg += f"Got {value}." + try: + implements = value.implements + except AttributeError as error: + raise TypeError(msg) from error + if implements != "response_generator": + raise TypeError(msg) + self._gen = value + + @property + def path(self) -> str: + """ + # Summary + + Dummy path. + + ## Raises + + - getter: `ValueError` if `path` is not set before accessing. + + ## Example + + ``/appcenter/cisco/ndfc/api/v1/...etc...`` + """ + if self._path is None: + msg = f"{self.class_name}.path: path must be set before accessing." + raise ValueError(msg) + return self._path + + @path.setter + def path(self, value: str): + self._path = value + + @property + def payload(self) -> Optional[dict[str, Any]]: + """ + # Summary + + Dummy payload. + + ## Raises + + None + """ + return self._payload + + @payload.setter + def payload(self, value: Optional[dict[str, Any]]): + self._payload = value + + @property + def raise_exception(self) -> Optional[BaseException]: + """ + # Summary + + The exception to raise when calling the method specified in `raise_method`. + + ## Raises + + - `TypeError` if value is not a subclass of `BaseException`. + + ## Usage + + ```python + instance = Sender() + instance.raise_method = "commit" + instance.raise_exception = ValueError + instance.commit() # will raise a simulated ValueError + ``` + + ## Notes + + - No error checking is done on the input to this property. + """ + if self._raise_exception is not None and not issubclass(type(self._raise_exception), BaseException): + msg = f"{self.class_name}.raise_exception: " + msg += "raise_exception must be a subclass of BaseException. " + msg += f"Got {self._raise_exception} of type {type(self._raise_exception).__name__}." + raise TypeError(msg) + return self._raise_exception + + @raise_exception.setter + def raise_exception(self, value: Optional[BaseException]): + if value is not None and not issubclass(type(value), BaseException): + msg = f"{self.class_name}.raise_exception: " + msg += "raise_exception must be a subclass of BaseException. " + msg += f"Got {value} of type {type(value).__name__}." + raise TypeError(msg) + self._raise_exception = value + + @property + def raise_method(self) -> Optional[str]: + """ + ## Summary + + The method in which to raise exception `raise_exception`. + + ## Raises + + None + + ## Usage + + See `raise_exception`. + """ + return self._raise_method + + @raise_method.setter + def raise_method(self, value: Optional[str]) -> None: + self._raise_method = value + + @property + def response(self) -> dict[str, Any]: + """ + # Summary + + The simulated response from a file. + + Returns a deepcopy to prevent mutation of the response object. + + ## Raises + + None + """ + return copy.deepcopy(self.gen.next) + + @property + def verb(self) -> HttpVerbEnum: + """ + # Summary + + Dummy Verb. + + ## Raises + + - `ValueError` if verb is not set. + """ + if self._verb is None: + msg = f"{self.class_name}.verb: verb must be set before accessing." + raise ValueError(msg) + return self._verb + + @verb.setter + def verb(self, value: HttpVerbEnum) -> None: + self._verb = value diff --git a/tests/unit/module_utils/test_response_handler_nd.py b/tests/unit/module_utils/test_response_handler_nd.py new file mode 100644 index 00000000..f3250dbc --- /dev/null +++ b/tests/unit/module_utils/test_response_handler_nd.py @@ -0,0 +1,1496 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for response_handler_nd.py + +Tests the ResponseHandler class for handling ND controller responses. +""" + +# pylint: disable=unused-import +# pylint: disable=redefined-outer-name +# pylint: disable=protected-access +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=invalid-name +# pylint: disable=line-too-long +# pylint: disable=too-many-lines + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.response_handler_nd import ResponseHandler +from ansible_collections.cisco.nd.plugins.module_utils.rest.response_strategies.nd_v1_strategy import NdV1Strategy +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import does_not_raise + +# ============================================================================= +# Test: ResponseHandler initialization +# ============================================================================= + + +def test_response_handler_nd_00010(): + """ + # Summary + + Verify ResponseHandler initialization with default values. + + ## Test + + - Instance can be created + - _response defaults to None + - _result defaults to None + - _verb defaults to None + - _strategy defaults to NdV1Strategy instance + + ## Classes and Methods + + - ResponseHandler.__init__() + """ + with does_not_raise(): + instance = ResponseHandler() + assert instance._response is None + assert instance._result is None + assert instance._verb is None + assert isinstance(instance._strategy, NdV1Strategy) + + +def test_response_handler_nd_00015(): + """ + # Summary + + Verify validation_strategy getter returns the default NdV1Strategy and + setter accepts a valid strategy. + + ## Test + + - Default strategy is NdV1Strategy + - Setting a new NdV1Strategy instance is accepted + - Getter returns the newly set strategy + + ## Classes and Methods + + - ResponseHandler.validation_strategy (getter/setter) + """ + instance = ResponseHandler() + assert isinstance(instance.validation_strategy, NdV1Strategy) + + new_strategy = NdV1Strategy() + with does_not_raise(): + instance.validation_strategy = new_strategy + assert instance.validation_strategy is new_strategy + + +def test_response_handler_nd_00020(): + """ + # Summary + + Verify validation_strategy setter raises TypeError for invalid type. + + ## Test + + - Setting validation_strategy to a non-strategy object raises TypeError + + ## Classes and Methods + + - ResponseHandler.validation_strategy (setter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.validation_strategy:.*Expected ResponseValidationStrategy" + with pytest.raises(TypeError, match=match): + instance.validation_strategy = "not a strategy" # type: ignore[assignment] + + +# ============================================================================= +# Test: ResponseHandler.response property +# ============================================================================= + + +def test_response_handler_nd_00100(): + """ + # Summary + + Verify response getter raises ValueError when not set. + + ## Test + + - Accessing response before setting raises ValueError + + ## Classes and Methods + + - ResponseHandler.response (getter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.response:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.response + + +def test_response_handler_nd_00110(): + """ + # Summary + + Verify response setter/getter with valid dict. + + ## Test + + - response can be set with a valid dict containing RETURN_CODE and MESSAGE + - response getter returns the set value + + ## Classes and Methods + + - ResponseHandler.response (setter/getter) + """ + instance = ResponseHandler() + response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {"key": "value"}} + with does_not_raise(): + instance.response = response + result = instance.response + assert result["RETURN_CODE"] == 200 + assert result["MESSAGE"] == "OK" + + +def test_response_handler_nd_00120(): + """ + # Summary + + Verify response setter raises TypeError for non-dict. + + ## Test + + - Setting response to a non-dict raises TypeError + + ## Classes and Methods + + - ResponseHandler.response (setter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.response.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.response = "not a dict" # type: ignore[assignment] + + +def test_response_handler_nd_00130(): + """ + # Summary + + Verify response setter raises ValueError when MESSAGE key is missing. + + ## Test + + - Setting response without MESSAGE raises ValueError + + ## Classes and Methods + + - ResponseHandler.response (setter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.response:.*must have a MESSAGE key" + with pytest.raises(ValueError, match=match): + instance.response = {"RETURN_CODE": 200} + + +def test_response_handler_nd_00140(): + """ + # Summary + + Verify response setter raises ValueError when RETURN_CODE key is missing. + + ## Test + + - Setting response without RETURN_CODE raises ValueError + + ## Classes and Methods + + - ResponseHandler.response (setter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.response:.*must have a RETURN_CODE key" + with pytest.raises(ValueError, match=match): + instance.response = {"MESSAGE": "OK"} + + +# ============================================================================= +# Test: ResponseHandler.verb property +# ============================================================================= + + +def test_response_handler_nd_00200(): + """ + # Summary + + Verify verb getter raises ValueError when not set. + + ## Test + + - Accessing verb before setting raises ValueError + + ## Classes and Methods + + - ResponseHandler.verb (getter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.verb is not set" + with pytest.raises(ValueError, match=match): + result = instance.verb + + +def test_response_handler_nd_00210(): + """ + # Summary + + Verify verb setter/getter with valid HttpVerbEnum. + + ## Test + + - verb can be set and retrieved with HttpVerbEnum values + + ## Classes and Methods + + - ResponseHandler.verb (setter/getter) + """ + instance = ResponseHandler() + with does_not_raise(): + instance.verb = HttpVerbEnum.GET + result = instance.verb + assert result == HttpVerbEnum.GET + + with does_not_raise(): + instance.verb = HttpVerbEnum.POST + result = instance.verb + assert result == HttpVerbEnum.POST + + +# ============================================================================= +# Test: ResponseHandler.result property +# ============================================================================= + + +def test_response_handler_nd_00300(): + """ + # Summary + + Verify result getter raises ValueError when commit() not called. + + ## Test + + - Accessing result before calling commit() raises ValueError + + ## Classes and Methods + + - ResponseHandler.result (getter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.result:.*must be set before accessing.*commit" + with pytest.raises(ValueError, match=match): + result = instance.result + + +def test_response_handler_nd_00310(): + """ + # Summary + + Verify result setter raises TypeError for non-dict. + + ## Test + + - Setting result to non-dict raises TypeError + + ## Classes and Methods + + - ResponseHandler.result (setter) + """ + instance = ResponseHandler() + match = r"ResponseHandler\.result.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.result = "not a dict" # type: ignore[assignment] + + +# ============================================================================= +# Test: ResponseHandler.commit() validation +# ============================================================================= + + +def test_response_handler_nd_00400(): + """ + # Summary + + Verify commit() raises ValueError when response is not set. + + ## Test + + - Calling commit() without setting response raises ValueError + + ## Classes and Methods + + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.verb = HttpVerbEnum.GET + match = r"ResponseHandler\.response:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_response_handler_nd_00410(): + """ + # Summary + + Verify commit() raises ValueError when verb is not set. + + ## Test + + - Calling commit() without setting verb raises ValueError + + ## Classes and Methods + + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + match = r"ResponseHandler\.verb is not set" + with pytest.raises(ValueError, match=match): + instance.commit() + + +# ============================================================================= +# Test: ResponseHandler._handle_get_response() +# ============================================================================= + + +def test_response_handler_nd_00500(): + """ + # Summary + + Verify GET response with 200 OK. + + ## Test + + - GET with RETURN_CODE 200 sets found=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00510(): + """ + # Summary + + Verify GET response with 201 Created. + + ## Test + + - GET with RETURN_CODE 201 sets found=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 201, "MESSAGE": "Created"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00520(): + """ + # Summary + + Verify GET response with 202 Accepted. + + ## Test + + - GET with RETURN_CODE 202 sets found=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 202, "MESSAGE": "Accepted"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00530(): + """ + # Summary + + Verify GET response with 204 No Content. + + ## Test + + - GET with RETURN_CODE 204 sets found=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 204, "MESSAGE": "No Content"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00535(): + """ + # Summary + + Verify GET response with 207 Multi-Status. + + ## Test + + - GET with RETURN_CODE 207 sets found=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 207, "MESSAGE": "Multi-Status"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00540(): + """ + # Summary + + Verify GET response with 404 Not Found. + + ## Test + + - GET with RETURN_CODE 404 sets found=False, success=True + - 404 is treated as "not found but not an error" + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 404, "MESSAGE": "Not Found"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is True + + +def test_response_handler_nd_00550(): + """ + # Summary + + Verify GET response with 500 Internal Server Error. + + ## Test + + - GET with RETURN_CODE 500 sets found=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 500, "MESSAGE": "Internal Server Error"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00560(): + """ + # Summary + + Verify GET response with 400 Bad Request. + + ## Test + + - GET with RETURN_CODE 400 sets found=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 400, "MESSAGE": "Bad Request"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00570(): + """ + # Summary + + Verify GET response with 401 Unauthorized. + + ## Test + + - GET with RETURN_CODE 401 sets found=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 401, "MESSAGE": "Unauthorized"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00575(): + """ + # Summary + + Verify GET response with 405 Method Not Allowed. + + ## Test + + - GET with RETURN_CODE 405 sets found=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 405, "MESSAGE": "Method Not Allowed"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00580(): + """ + # Summary + + Verify GET response with 409 Conflict. + + ## Test + + - GET with RETURN_CODE 409 sets found=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_get_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 409, "MESSAGE": "Conflict"} + instance.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.commit() + assert instance.result["found"] is False + assert instance.result["success"] is False + + +# ============================================================================= +# Test: ResponseHandler._handle_post_put_delete_response() +# ============================================================================= + + +def test_response_handler_nd_00600(): + """ + # Summary + + Verify POST response with 200 OK (no errors). + + ## Test + + - POST with RETURN_CODE 200 and no errors sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {"status": "created"}} + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00610(): + """ + # Summary + + Verify PUT response with 200 OK. + + ## Test + + - PUT with RETURN_CODE 200 and no errors sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {"status": "updated"}} + instance.verb = HttpVerbEnum.PUT + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00620(): + """ + # Summary + + Verify DELETE response with 200 OK. + + ## Test + + - DELETE with RETURN_CODE 200 and no errors sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + instance.verb = HttpVerbEnum.DELETE + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00630(): + """ + # Summary + + Verify POST response with 201 Created. + + ## Test + + - POST with RETURN_CODE 201 sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 201, "MESSAGE": "Created", "DATA": {}} + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00640(): + """ + # Summary + + Verify POST response with 202 Accepted. + + ## Test + + - POST with RETURN_CODE 202 sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 202, "MESSAGE": "Accepted", "DATA": {}} + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00650(): + """ + # Summary + + Verify DELETE response with 204 No Content. + + ## Test + + - DELETE with RETURN_CODE 204 sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 204, "MESSAGE": "No Content", "DATA": {}} + instance.verb = HttpVerbEnum.DELETE + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00655(): + """ + # Summary + + Verify POST response with 207 Multi-Status. + + ## Test + + - POST with RETURN_CODE 207 and no errors sets changed=True, success=True + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 207, "MESSAGE": "Multi-Status", "DATA": {"status": "partial"}} + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is True + assert instance.result["success"] is True + + +def test_response_handler_nd_00660(): + """ + # Summary + + Verify POST response with explicit ERROR key. + + ## Test + + - Response containing ERROR key sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "ERROR": "Something went wrong", + "DATA": {}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00670(): + """ + # Summary + + Verify POST response with DATA.error (ND error format). + + ## Test + + - Response with DATA containing error key sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"error": "ND error occurred"}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00680(): + """ + # Summary + + Verify POST response with 500 error status code. + + ## Test + + - POST with RETURN_CODE 500 sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 500, + "MESSAGE": "Internal Server Error", + "DATA": {}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00690(): + """ + # Summary + + Verify POST response with 400 Bad Request. + + ## Test + + - POST with RETURN_CODE 400 and no explicit errors sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": {}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00695(): + """ + # Summary + + Verify POST response with 405 Method Not Allowed. + + ## Test + + - POST with RETURN_CODE 405 sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 405, + "MESSAGE": "Method Not Allowed", + "DATA": {}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +def test_response_handler_nd_00705(): + """ + # Summary + + Verify POST response with 409 Conflict. + + ## Test + + - POST with RETURN_CODE 409 sets changed=False, success=False + + ## Classes and Methods + + - ResponseHandler._handle_post_put_delete_response() + - ResponseHandler.commit() + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 409, + "MESSAGE": "Conflict", + "DATA": {"reason": "resource exists"}, + } + instance.verb = HttpVerbEnum.POST + with does_not_raise(): + instance.commit() + assert instance.result["changed"] is False + assert instance.result["success"] is False + + +# ============================================================================= +# Test: ResponseHandler.error_message property +# ============================================================================= + + +def test_response_handler_nd_00700(): + """ + # Summary + + Verify error_message returns None on successful response. + + ## Test + + - error_message is None when result indicates success + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is None + + +def test_response_handler_nd_00710(): + """ + # Summary + + Verify error_message returns None when commit() not called. + + ## Test + + - error_message is None when _result is None (commit not called) + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + assert instance.error_message is None + + +def test_response_handler_nd_00720(): + """ + # Summary + + Verify error_message for raw_response format (non-JSON response). + + ## Test + + - When DATA contains raw_response key, error_message indicates non-JSON response + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 500, + "MESSAGE": "Internal Server Error", + "DATA": {"raw_response": "Error"}, + } + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is not None + assert "could not be parsed as JSON" in instance.error_message + + +def test_response_handler_nd_00730(): + """ + # Summary + + Verify error_message for code/message format. + + ## Test + + - When DATA contains code and message keys, error_message includes both + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": {"code": "INVALID_INPUT", "message": "Field X is required"}, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "INVALID_INPUT" in instance.error_message + assert "Field X is required" in instance.error_message + + +def test_response_handler_nd_00740(): + """ + # Summary + + Verify error_message for messages array format. + + ## Test + + - When DATA contains messages array with code/severity/message, + error_message includes all three fields + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": { + "messages": [ + { + "code": "ERR_001", + "severity": "ERROR", + "message": "Validation failed", + } + ] + }, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "ERR_001" in instance.error_message + assert "ERROR" in instance.error_message + assert "Validation failed" in instance.error_message + + +def test_response_handler_nd_00750(): + """ + # Summary + + Verify error_message for errors array format. + + ## Test + + - When DATA contains errors array, error_message includes the first error + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": {"errors": ["First error message", "Second error message"]}, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "First error message" in instance.error_message + + +def test_response_handler_nd_00760(): + """ + # Summary + + Verify error_message when DATA is None (connection failure). + + ## Test + + - When DATA is None, error_message includes REQUEST_PATH and MESSAGE + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 500, + "MESSAGE": "Connection refused", + "REQUEST_PATH": "/api/v1/some/endpoint", + } + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is not None + assert "Connection failed" in instance.error_message + assert "/api/v1/some/endpoint" in instance.error_message + assert "Connection refused" in instance.error_message + + +def test_response_handler_nd_00770(): + """ + # Summary + + Verify error_message with non-dict DATA. + + ## Test + + - When DATA is a non-dict value, error_message includes stringified DATA + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 500, + "MESSAGE": "Internal Server Error", + "DATA": "Unexpected string error", + } + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is not None + assert "Unexpected string error" in instance.error_message + + +def test_response_handler_nd_00780(): + """ + # Summary + + Verify error_message fallback for unknown dict format. + + ## Test + + - When DATA is a dict with no recognized error format, + error_message falls back to including RETURN_CODE + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 503, + "MESSAGE": "Service Unavailable", + "DATA": {"some_unknown_key": "some_value"}, + } + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is not None + assert "503" in instance.error_message + + +def test_response_handler_nd_00790(): + """ + # Summary + + Verify error_message returns None when result success is True. + + ## Test + + - Even with error-like DATA, if result is success, error_message is None + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"errors": ["Some error"]}, + } + instance.verb = HttpVerbEnum.GET + instance.commit() + # For GET with 200, success is True regardless of DATA content + assert instance.result["success"] is True + assert instance.error_message is None + + +def test_response_handler_nd_00800(): + """ + # Summary + + Verify error_message for connection failure with no REQUEST_PATH. + + ## Test + + - When DATA is None and REQUEST_PATH is missing, error_message uses "unknown" + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 500, + "MESSAGE": "Connection timed out", + } + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.error_message is not None + assert "unknown" in instance.error_message + assert "Connection timed out" in instance.error_message + + +def test_response_handler_nd_00810(): + """ + # Summary + + Verify error_message for messages array with empty array. + + ## Test + + - When DATA contains an empty messages array, messages format is skipped + and fallback is used + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": {"messages": []}, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "400" in instance.error_message + + +def test_response_handler_nd_00820(): + """ + # Summary + + Verify error_message for errors array with empty array. + + ## Test + + - When DATA contains an empty errors array, errors format is skipped + and fallback is used + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": {"errors": []}, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "400" in instance.error_message + + +# ============================================================================= +# Test: ResponseHandler._handle_response() routing +# ============================================================================= + + +def test_response_handler_nd_00900(): + """ + # Summary + + Verify _handle_response routes GET to _handle_get_response. + + ## Test + + - GET verb produces result with "found" key (not "changed") + + ## Classes and Methods + + - ResponseHandler._handle_response() + - ResponseHandler._handle_get_response() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + instance.verb = HttpVerbEnum.GET + instance.commit() + assert "found" in instance.result + assert "changed" not in instance.result + + +def test_response_handler_nd_00910(): + """ + # Summary + + Verify _handle_response routes POST to _handle_post_put_delete_response. + + ## Test + + - POST verb produces result with "changed" key (not "found") + + ## Classes and Methods + + - ResponseHandler._handle_response() + - ResponseHandler._handle_post_put_delete_response() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + instance.verb = HttpVerbEnum.POST + instance.commit() + assert "changed" in instance.result + assert "found" not in instance.result + + +def test_response_handler_nd_00920(): + """ + # Summary + + Verify _handle_response routes PUT to _handle_post_put_delete_response. + + ## Test + + - PUT verb produces result with "changed" key (not "found") + + ## Classes and Methods + + - ResponseHandler._handle_response() + - ResponseHandler._handle_post_put_delete_response() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + instance.verb = HttpVerbEnum.PUT + instance.commit() + assert "changed" in instance.result + assert "found" not in instance.result + + +def test_response_handler_nd_00930(): + """ + # Summary + + Verify _handle_response routes DELETE to _handle_post_put_delete_response. + + ## Test + + - DELETE verb produces result with "changed" key (not "found") + + ## Classes and Methods + + - ResponseHandler._handle_response() + - ResponseHandler._handle_post_put_delete_response() + """ + instance = ResponseHandler() + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + instance.verb = HttpVerbEnum.DELETE + instance.commit() + assert "changed" in instance.result + assert "found" not in instance.result + + +# ============================================================================= +# Test: ResponseHandler with code/message + messages array in same response +# ============================================================================= + + +def test_response_handler_nd_01000(): + """ + # Summary + + Verify error_message prefers code/message format over messages array. + + ## Test + + - When DATA contains both code/message and messages array, + code/message takes priority + + ## Classes and Methods + + - ResponseHandler.error_message + """ + instance = ResponseHandler() + instance.response = { + "RETURN_CODE": 400, + "MESSAGE": "Bad Request", + "DATA": { + "code": "PRIMARY_ERROR", + "message": "Primary error message", + "messages": [ + { + "code": "SECONDARY", + "severity": "WARNING", + "message": "Secondary message", + } + ], + }, + } + instance.verb = HttpVerbEnum.POST + instance.commit() + assert instance.error_message is not None + assert "PRIMARY_ERROR" in instance.error_message + assert "Primary error message" in instance.error_message + + +# ============================================================================= +# Test: ResponseHandler commit() can be called multiple times +# ============================================================================= + + +def test_response_handler_nd_01100(): + """ + # Summary + + Verify commit() can be called with different responses. + + ## Test + + - First commit with 200 success + - Second commit with 500 error + - result reflects the most recent commit + + ## Classes and Methods + + - ResponseHandler.commit() + """ + instance = ResponseHandler() + + # First commit - success + instance.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.result["success"] is True + assert instance.result["found"] is True + + # Second commit - failure + instance.response = {"RETURN_CODE": 500, "MESSAGE": "Internal Server Error"} + instance.verb = HttpVerbEnum.GET + instance.commit() + assert instance.result["success"] is False + assert instance.result["found"] is False diff --git a/tests/unit/module_utils/test_rest_send.py b/tests/unit/module_utils/test_rest_send.py new file mode 100644 index 00000000..5f5a8500 --- /dev/null +++ b/tests/unit/module_utils/test_rest_send.py @@ -0,0 +1,1551 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for rest_send.py + +Tests the RestSend class for sending REST requests with retries +""" + +# pylint: disable=disallowed-name,protected-access,too-many-lines + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import inspect + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.response_handler_nd import ResponseHandler +from ansible_collections.cisco.nd.plugins.module_utils.rest.rest_send import RestSend +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import does_not_raise +from ansible_collections.cisco.nd.tests.unit.module_utils.fixtures.load_fixture import load_fixture +from ansible_collections.cisco.nd.tests.unit.module_utils.mock_ansible_module import MockAnsibleModule +from ansible_collections.cisco.nd.tests.unit.module_utils.response_generator import ResponseGenerator +from ansible_collections.cisco.nd.tests.unit.module_utils.sender_file import Sender + + +def responses_rest_send(key: str): + """ + Load fixture data for rest_send tests + """ + return load_fixture("test_rest_send")[key] + + +# ============================================================================= +# Test: RestSend initialization +# ============================================================================= + + +def test_rest_send_00010(): + """ + # Summary + + Verify RestSend initialization with default values + + ## Test + + - Instance can be created with params dict + - check_mode defaults to False + - timeout defaults to 300 + - send_interval defaults to 5 + - unit_test defaults to False + + ## Classes and Methods + + - RestSend.__init__() + """ + params = {"check_mode": False, "state": "merged"} + with does_not_raise(): + instance = RestSend(params) + assert instance.check_mode is False + assert instance.timeout == 300 + assert instance.send_interval == 5 + assert instance.unit_test is False + + +def test_rest_send_00020(): + """ + # Summary + + Verify RestSend initialization with check_mode True + + ## Test + + - check_mode can be set via params + + ## Classes and Methods + + - RestSend.__init__() + """ + params = {"check_mode": True, "state": "merged"} + with does_not_raise(): + instance = RestSend(params) + assert instance.check_mode is True + + +def test_rest_send_00030(): + """ + # Summary + + Verify RestSend raises TypeError for invalid check_mode + + ## Test + + - check_mode setter raises TypeError if not bool + + ## Classes and Methods + + - RestSend.check_mode + """ + params = {"check_mode": False} + instance = RestSend(params) + match = r"RestSend\.check_mode:.*must be a boolean" + with pytest.raises(TypeError, match=match): + instance.check_mode = "invalid" # type: ignore[assignment] + + +# ============================================================================= +# Test: RestSend property setters/getters +# ============================================================================= + + +def test_rest_send_00100(): + """ + # Summary + + Verify path property getter/setter + + ## Test + + - path can be set and retrieved + - ValueError raised if accessed before being set + + ## Classes and Methods + + - RestSend.path + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test ValueError when accessing before setting + match = r"RestSend\.path:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.path # pylint: disable=pointless-statement + + # Test setter/getter + with does_not_raise(): + instance.path = "/api/v1/test/endpoint" + result = instance.path + assert result == "/api/v1/test/endpoint" + + +def test_rest_send_00110(): + """ + # Summary + + Verify verb property getter/setter + + ## Test + + - verb can be set and retrieved with HttpVerbEnum + - verb has default value of HttpVerbEnum.GET + - TypeError raised if not HttpVerbEnum + + ## Classes and Methods + + - RestSend.verb + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test default value + with does_not_raise(): + result = instance.verb + assert result == HttpVerbEnum.GET + + # Test TypeError for invalid type + match = r"RestSend\.verb:.*must be an instance of HttpVerbEnum" + with pytest.raises(TypeError, match=match): + instance.verb = "GET" # type: ignore[assignment] + + # Test setter/getter with valid HttpVerbEnum + with does_not_raise(): + instance.verb = HttpVerbEnum.POST + result = instance.verb + assert result == HttpVerbEnum.POST + + +def test_rest_send_00120(): + """ + # Summary + + Verify payload property getter/setter + + ## Test + + - payload can be set and retrieved + - payload defaults to None + - TypeError raised if not dict + + ## Classes and Methods + + - RestSend.payload + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test default value + with does_not_raise(): + result = instance.payload + assert result is None + + # Test TypeError for invalid type + match = r"RestSend\.payload:.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.payload = "invalid" # type: ignore[assignment] + + # Test setter/getter with dict + with does_not_raise(): + instance.payload = {"key": "value"} + result = instance.payload + assert result == {"key": "value"} + + +def test_rest_send_00130(): + """ + # Summary + + Verify timeout property getter/setter + + ## Test + + - timeout can be set and retrieved + - timeout defaults to 300 + - TypeError raised if not int + + ## Classes and Methods + + - RestSend.timeout + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test default value + assert instance.timeout == 300 + + # Test TypeError for boolean (bool is subclass of int) + match = r"RestSend\.timeout:.*must be an integer" + with pytest.raises(TypeError, match=match): + instance.timeout = True # type: ignore[assignment] + + # Test TypeError for string + with pytest.raises(TypeError, match=match): + instance.timeout = "300" # type: ignore[assignment] + + # Test setter/getter with int + with does_not_raise(): + instance.timeout = 600 + assert instance.timeout == 600 + + +def test_rest_send_00140(): + """ + # Summary + + Verify send_interval property getter/setter + + ## Test + + - send_interval can be set and retrieved + - send_interval defaults to 5 + - TypeError raised if not int + + ## Classes and Methods + + - RestSend.send_interval + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test default value + assert instance.send_interval == 5 + + # Test TypeError for boolean + match = r"RestSend\.send_interval:.*must be an integer" + with pytest.raises(TypeError, match=match): + instance.send_interval = False # type: ignore[assignment] + + # Test setter/getter with int + with does_not_raise(): + instance.send_interval = 10 + assert instance.send_interval == 10 + + +def test_rest_send_00150(): + """ + # Summary + + Verify unit_test property getter/setter + + ## Test + + - unit_test can be set and retrieved + - unit_test defaults to False + - TypeError raised if not bool + + ## Classes and Methods + + - RestSend.unit_test + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test default value + assert instance.unit_test is False + + # Test TypeError for non-bool + match = r"RestSend\.unit_test:.*must be a boolean" + with pytest.raises(TypeError, match=match): + instance.unit_test = "true" # type: ignore[assignment] + + # Test setter/getter with bool + with does_not_raise(): + instance.unit_test = True + assert instance.unit_test is True + + +def test_rest_send_00160(): + """ + # Summary + + Verify sender property getter/setter + + ## Test + + - sender must be set before accessing + - sender must implement SenderProtocol + - ValueError raised if accessed before being set + - TypeError raised if not SenderProtocol + + ## Classes and Methods + + - RestSend.sender + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test ValueError when accessing before setting + match = r"RestSend\.sender:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.sender # pylint: disable=pointless-statement + + # Test TypeError for invalid type + match = r"RestSend\.sender:.*must implement SenderProtocol" + with pytest.raises(TypeError, match=match): + instance.sender = "invalid" # type: ignore[assignment] + + # Test setter/getter with valid Sender + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + with does_not_raise(): + instance.sender = sender + result = instance.sender + assert result is sender + + +def test_rest_send_00170(): + """ + # Summary + + Verify response_handler property getter/setter + + ## Test + + - response_handler must be set before accessing + - response_handler must implement ResponseHandlerProtocol + - ValueError raised if accessed before being set + - TypeError raised if not ResponseHandlerProtocol + + ## Classes and Methods + + - RestSend.response_handler + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Test ValueError when accessing before setting + match = r"RestSend\.response_handler:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.response_handler # pylint: disable=pointless-statement + + # Test TypeError for invalid type + match = r"RestSend\.response_handler:.*must implement ResponseHandlerProtocol" + with pytest.raises(TypeError, match=match): + instance.response_handler = "invalid" # type: ignore[assignment] + + # Test setter/getter with valid ResponseHandler + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + instance.sender = sender + + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + with does_not_raise(): + instance.response_handler = response_handler + result = instance.response_handler + assert result is response_handler + + +# ============================================================================= +# Test: RestSend save_settings() and restore_settings() +# ============================================================================= + + +def test_rest_send_00200(): + """ + # Summary + + Verify save_settings() and restore_settings() + + ## Test + + - save_settings() saves current check_mode and timeout + - restore_settings() restores saved values + + ## Classes and Methods + + - RestSend.save_settings() + - RestSend.restore_settings() + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Set initial values + instance.check_mode = False + instance.timeout = 300 + + # Save settings + with does_not_raise(): + instance.save_settings() + + # Modify values + instance.check_mode = True + instance.timeout = 600 + + # Verify modified values + assert instance.check_mode is True + assert instance.timeout == 600 + + # Restore settings + with does_not_raise(): + instance.restore_settings() + + # Verify restored values + assert instance.check_mode is False + assert instance.timeout == 300 + + +def test_rest_send_00210(): + """ + # Summary + + Verify restore_settings() when save_settings() not called + + ## Test + + - restore_settings() does nothing if save_settings() not called + + ## Classes and Methods + + - RestSend.restore_settings() + """ + params = {"check_mode": False} + instance = RestSend(params) + + # Set values without saving + instance.check_mode = True + instance.timeout = 600 + + # Call restore_settings without prior save + with does_not_raise(): + instance.restore_settings() + + # Values should remain unchanged + assert instance.check_mode is True + assert instance.timeout == 600 + + +# ============================================================================= +# Test: RestSend commit() in check mode +# ============================================================================= + + +def test_rest_send_00300(): + """ + # Summary + + Verify commit() in check_mode for GET request + + ## Test + + - GET requests in check_mode return simulated success response + - response_current contains check mode indicator + - result_current shows success + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_check_mode() + """ + params = {"check_mode": True} + + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/checkmode" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Verify check mode response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["METHOD"] == HttpVerbEnum.GET + assert instance.response_current["REQUEST_PATH"] == "/api/v1/test/checkmode" + assert instance.response_current["CHECK_MODE"] is True + assert instance.result_current["success"] is True + assert instance.result_current["found"] is True + + +def test_rest_send_00310(): + """ + # Summary + + Verify commit() in check_mode for POST request + + ## Test + + - POST requests in check_mode return simulated success response + - changed flag is True for write operations + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_check_mode() + """ + params = {"check_mode": True} + + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {}} + response_handler.verb = HttpVerbEnum.POST + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/create" + instance.verb = HttpVerbEnum.POST + instance.payload = {"name": "test"} + instance.commit() + + # Verify check mode response for write operation + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["METHOD"] == HttpVerbEnum.POST + assert instance.response_current["CHECK_MODE"] is True + assert instance.result_current["success"] is True + assert instance.result_current["changed"] is True + + +# ============================================================================= +# Test: RestSend commit() in normal mode with successful responses +# ============================================================================= + + +def test_rest_send_00400(): + """ + # Summary + + Verify commit() with successful GET request + + ## Test + + - GET request returns successful response + - response_current and result_current are populated + - response and result lists contain the responses + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/endpoint" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Verify response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["METHOD"] == "GET" + assert instance.response_current["DATA"]["status"] == "success" + + # Verify result (GET requests return "found", not "changed") + assert instance.result_current["success"] is True + assert instance.result_current["found"] is True + + # Verify response and result lists + assert len(instance.responses) == 1 + assert len(instance.results) == 1 + + +def test_rest_send_00410(): + """ + # Summary + + Verify commit() with successful POST request + + ## Test + + - POST request with payload returns successful response + - changed flag is True for write operations + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/create" + instance.verb = HttpVerbEnum.POST + instance.payload = {"name": "test"} + instance.commit() + + # Verify response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["DATA"]["status"] == "created" + + # Verify result + assert instance.result_current["success"] is True + assert instance.result_current["changed"] is True + + +def test_rest_send_00420(): + """ + # Summary + + Verify commit() with successful PUT request + + ## Test + + - PUT request returns successful response + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/update/12345" + instance.verb = HttpVerbEnum.PUT + instance.payload = {"status": "updated"} + instance.commit() + + # Verify response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["DATA"]["status"] == "updated" + + # Verify result + assert instance.result_current["success"] is True + assert instance.result_current["changed"] is True + + +def test_rest_send_00430(): + """ + # Summary + + Verify commit() with successful DELETE request + + ## Test + + - DELETE request returns successful response + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/delete/12345" + instance.verb = HttpVerbEnum.DELETE + instance.commit() + + # Verify response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["DATA"]["status"] == "deleted" + + # Verify result + assert instance.result_current["success"] is True + assert instance.result_current["changed"] is True + + +# ============================================================================= +# Test: RestSend commit() with failed responses +# ============================================================================= + + +def test_rest_send_00500(): + """ + # Summary + + Verify commit() with 404 Not Found response + + ## Test + + - Failed GET request returns 404 response + - result shows success=False + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.timeout = 1 + instance.path = "/api/v1/test/notfound" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Verify error response (GET with 404 returns "found": False) + assert instance.response_current["RETURN_CODE"] == 404 + assert instance.result_current["success"] is True + assert instance.result_current["found"] is False + + +def test_rest_send_00510(): + """ + # Summary + + Verify commit() with 400 Bad Request response + + ## Test + + - Failed POST request returns 400 response + - Loop retries until timeout is exhausted + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide responses for multiple retry attempts (60 retries * 5 second interval = 300 seconds) + for _ in range(60): + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.timeout = 10 + instance.send_interval = 5 + instance.path = "/api/v1/test/badrequest" + instance.verb = HttpVerbEnum.POST + instance.payload = {"invalid": "data"} + instance.commit() + + # Verify error response + assert instance.response_current["RETURN_CODE"] == 400 + assert instance.result_current["success"] is False + + +def test_rest_send_00520(): + """ + # Summary + + Verify commit() with 500 Internal Server Error response + + ## Test + + - Failed GET request returns 500 response + - Loop retries until timeout is exhausted + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide responses for multiple retry attempts (60 retries * 5 second interval = 300 seconds) + for _ in range(60): + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.timeout = 10 + instance.send_interval = 5 + instance.path = "/api/v1/test/servererror" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Verify error response + assert instance.response_current["RETURN_CODE"] == 500 + assert instance.result_current["success"] is False + + +# ============================================================================= +# Test: RestSend commit() with retry logic +# ============================================================================= + + +def test_rest_send_00600(): + """ + # Summary + + Verify commit() retries on failure then succeeds + + ## Test + + - First response is 500 error + - Second response is 200 success + - Final result is success + + ## Classes and Methods + + - RestSend.commit() + - RestSend._commit_normal_mode() + """ + method_name = inspect.stack()[0][3] + + def responses(): + # Retry test sequence: error then success + yield responses_rest_send(f"{method_name}a") + yield responses_rest_send(f"{method_name}a") + yield responses_rest_send(f"{method_name}b") + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + with does_not_raise(): + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.timeout = 10 + instance.send_interval = 1 + instance.path = "/api/v1/test/retry" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Verify final successful response + assert instance.response_current["RETURN_CODE"] == 200 + assert instance.response_current["DATA"]["status"] == "success" + assert instance.result_current["success"] is True + + +# ============================================================================= +# Test: RestSend multiple sequential commits +# ============================================================================= + + +def test_rest_send_00700(): + """ + # Summary + + Verify multiple sequential commit() calls + + ## Test + + - Multiple commits append to response and result lists + - Each commit populates response_current and result_current + + ## Classes and Methods + + - RestSend.commit() + """ + method_name = inspect.stack()[0][3] + + def responses(): + # 3 sequential commits + yield responses_rest_send(f"{method_name}a") + yield responses_rest_send(f"{method_name}b") + yield responses_rest_send(f"{method_name}c") + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + + # First commit - GET + with does_not_raise(): + instance.path = "/api/v1/test/multi/1" + instance.verb = HttpVerbEnum.GET + instance.commit() + + assert instance.response_current["DATA"]["id"] == 1 + assert len(instance.responses) == 1 + assert len(instance.results) == 1 + + # Second commit - GET + with does_not_raise(): + instance.path = "/api/v1/test/multi/2" + instance.verb = HttpVerbEnum.GET + instance.commit() + + assert instance.response_current["DATA"]["id"] == 2 + assert len(instance.responses) == 2 + assert len(instance.results) == 2 + + # Third commit - POST + with does_not_raise(): + instance.path = "/api/v1/test/multi/create" + instance.verb = HttpVerbEnum.POST + instance.payload = {"name": "third"} + instance.commit() + + assert instance.response_current["DATA"]["id"] == 3 + assert instance.response_current["DATA"]["status"] == "created" + assert len(instance.responses) == 3 + assert len(instance.results) == 3 + + +# ============================================================================= +# Test: RestSend error conditions +# ============================================================================= + + +def test_rest_send_00800(): + """ + # Summary + + Verify commit() raises ValueError when path not set + + ## Test + + - commit() raises ValueError if path not set + + ## Classes and Methods + + - RestSend.commit() + """ + params = {"check_mode": False} + + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.verb = HttpVerbEnum.GET + + # Don't set path - should raise ValueError + match = r"RestSend\.path:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_rest_send_00810(): + """ + # Summary + + Verify commit() raises ValueError when verb not set + + ## Test + + - commit() raises ValueError if verb not set + + ## Classes and Methods + + - RestSend.commit() + """ + params = {"check_mode": False} + + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.path = "/api/v1/test" + + # Reset verb to None to test ValueError + instance._verb = None # type: ignore[assignment] + + match = r"RestSend\.verb:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_rest_send_00820(): + """ + # Summary + + Verify commit() raises ValueError when sender not set + + ## Test + + - commit() raises ValueError if sender not set + + ## Classes and Methods + + - RestSend.commit() + """ + params = {"check_mode": False} + + instance = RestSend(params) + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + # Don't set sender - should raise ValueError + match = r"RestSend\.sender:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_rest_send_00830(): + """ + # Summary + + Verify commit() raises ValueError when response_handler not set + + ## Test + + - commit() raises ValueError if response_handler not set + + ## Classes and Methods + + - RestSend.commit() + """ + params = {"check_mode": False} + + def responses(): + # Stub responses (not consumed in this test) + yield {} + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + instance = RestSend(params) + instance.sender = sender + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + # Don't set response_handler - should raise ValueError + match = r"RestSend\.response_handler:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + instance.commit() + + +# ============================================================================= +# Test: RestSend response and result properties +# ============================================================================= + + +def test_rest_send_00900(): + """ + # Summary + + Verify response and result properties return copies + + ## Test + + - response returns deepcopy of response list + - result returns deepcopy of result list + - Modifying returned values doesn't affect internal state + + ## Classes and Methods + + - RestSend.response + - RestSend.result + - RestSend.response_current + - RestSend.result_current + """ + method_name = inspect.stack()[0][3] + key = f"{method_name}a" + + def responses(): + # Provide an extra response entry for potential retry scenarios + yield responses_rest_send(key) + yield responses_rest_send(key) + + gen_responses = ResponseGenerator(responses()) + + params = {"check_mode": False} + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.unit_test = True + instance.path = "/api/v1/test/endpoint" + instance.verb = HttpVerbEnum.GET + instance.commit() + + # Get response and result + response_copy = instance.responses + result_copy = instance.results + response_current_copy = instance.response_current + result_current_copy = instance.result_current + + # Modify copies + response_copy[0]["MODIFIED"] = True + result_copy[0]["MODIFIED"] = True + response_current_copy["MODIFIED"] = True + result_current_copy["MODIFIED"] = True + + # Verify original values unchanged + assert "MODIFIED" not in instance._response[0] + assert "MODIFIED" not in instance._result[0] + assert "MODIFIED" not in instance._response_current + assert "MODIFIED" not in instance._result_current + + +def test_rest_send_00910(): + """ + # Summary + + Verify failed_result property + + ## Test + + - failed_result returns a failure dict with changed=False + + ## Classes and Methods + + - RestSend.failed_result + """ + params = {"check_mode": False} + instance = RestSend(params) + + with does_not_raise(): + result = instance.failed_result + + assert result["failed"] is True + assert result["changed"] is False + + +# ============================================================================= +# Test: RestSend with sender exception simulation +# ============================================================================= + + +def test_rest_send_01000(): + """ + # Summary + + Verify commit() handles sender exceptions + + ## Test + + - Sender.commit() can raise exceptions + - RestSend.commit() propagates the exception + + ## Classes and Methods + + - RestSend.commit() + - Sender.commit() + - Sender.raise_exception + - Sender.raise_method + """ + params = {"check_mode": False} + + def responses(): + yield {} + + gen_responses = ResponseGenerator(responses()) + sender = Sender() + sender.ansible_module = MockAnsibleModule() + sender.gen = gen_responses + sender.path = "/api/v1/test" + sender.verb = HttpVerbEnum.GET + + # Configure sender to raise exception + sender.raise_method = "commit" + sender.raise_exception = ValueError("Simulated sender error") + + instance = RestSend(params) + instance.sender = sender + response_handler = ResponseHandler() + response_handler.response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + response_handler.verb = HttpVerbEnum.GET + response_handler.commit() + instance.response_handler = response_handler + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + # commit() should raise ValueError + match = r"Simulated sender error" + with pytest.raises(ValueError, match=match): + instance.commit() + + +# ============================================================================= +# Test: RestSend.add_response() +# ============================================================================= + + +def test_rest_send_add_response_success(): + """ + # Summary + + Verify add_response() appends a valid dict to the response list. + + ## Test + + - add_response() with a valid dict appends to the response list + + ## Classes and Methods + + - RestSend.add_response + """ + params = {"check_mode": False} + instance = RestSend(params) + + with does_not_raise(): + instance.add_response({"RETURN_CODE": 200}) + instance.add_response({"RETURN_CODE": 404}) + + assert len(instance.responses) == 2 + assert instance.responses[0] == {"RETURN_CODE": 200} + assert instance.responses[1] == {"RETURN_CODE": 404} + + +def test_rest_send_add_response_type_error(): + """ + # Summary + + Verify add_response() raises TypeError for non-dict value. + + ## Test + + - add_response() raises TypeError if value is not a dict + + ## Classes and Methods + + - RestSend.add_response + """ + params = {"check_mode": False} + instance = RestSend(params) + + match = r"RestSend\.add_response:.*value must be a dict" + with pytest.raises(TypeError, match=match): + instance.add_response("invalid") # type: ignore[arg-type] + + +# ============================================================================= +# Test: RestSend.add_result() +# ============================================================================= + + +def test_rest_send_add_result_success(): + """ + # Summary + + Verify add_result() appends a valid dict to the result list. + + ## Test + + - add_result() with a valid dict appends to the result list + + ## Classes and Methods + + - RestSend.add_result + """ + params = {"check_mode": False} + instance = RestSend(params) + + with does_not_raise(): + instance.add_result({"changed": True}) + instance.add_result({"changed": False}) + + assert len(instance.results) == 2 + assert instance.results[0] == {"changed": True} + assert instance.results[1] == {"changed": False} + + +def test_rest_send_add_result_type_error(): + """ + # Summary + + Verify add_result() raises TypeError for non-dict value. + + ## Test + + - add_result() raises TypeError if value is not a dict + + ## Classes and Methods + + - RestSend.add_result + """ + params = {"check_mode": False} + instance = RestSend(params) + + match = r"RestSend\.add_result:.*value must be a dict" + with pytest.raises(TypeError, match=match): + instance.add_result("invalid") # type: ignore[arg-type] diff --git a/tests/unit/module_utils/test_results.py b/tests/unit/module_utils/test_results.py new file mode 100644 index 00000000..a7d3609f --- /dev/null +++ b/tests/unit/module_utils/test_results.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for results.py + +Tests the Results class and its Pydantic models for collecting and aggregating +API call results. +""" + +# pylint: disable=protected-access + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum, OperationType +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import ( + ApiCallResult, + PendingApiCall, + Results, +) + +# ============================================================================= +# Helper: register a task with all fields populated +# ============================================================================= + + +def _register_task(results, path="/api/v1/test", verb=HttpVerbEnum.POST, payload=None, verbosity_level=3): + """Register a single task with the given request-side fields.""" + results.path_current = path + results.verb_current = verb + results.payload_current = payload + results.verbosity_level_current = verbosity_level + results.action = "test_action" + results.state = "merged" + results.check_mode = False + results.operation_type = OperationType.DELETE + results.response_current = {"RETURN_CODE": 200, "MESSAGE": "OK"} + results.result_current = {"success": True, "changed": True} + results.diff_current = {"before": {}, "after": {"foo": "bar"}} + results.register_api_call() + + +# ============================================================================= +# Test: PendingApiCall new fields +# ============================================================================= + + +class TestPendingApiCallNewFields: + """Tests for the new fields on PendingApiCall.""" + + def test_defaults(self): + """New fields have correct defaults.""" + pending = PendingApiCall() + assert pending.path == "" + assert pending.verb == HttpVerbEnum.GET + assert pending.payload is None + assert pending.verbosity_level == 3 + + def test_explicit_values(self): + """New fields accept explicit values.""" + pending = PendingApiCall( + path="/api/v1/fabrics", + verb=HttpVerbEnum.DELETE, + payload={"name": "FABRIC_1"}, + verbosity_level=5, + ) + assert pending.path == "/api/v1/fabrics" + assert pending.verb == HttpVerbEnum.DELETE + assert pending.payload == {"name": "FABRIC_1"} + assert pending.verbosity_level == 5 + + def test_verbosity_level_min_boundary(self): + """verbosity_level rejects values below 1.""" + with pytest.raises(Exception): + PendingApiCall(verbosity_level=0) + + def test_verbosity_level_max_boundary(self): + """verbosity_level rejects values above 6.""" + with pytest.raises(Exception): + PendingApiCall(verbosity_level=7) + + def test_verbosity_level_valid_boundaries(self): + """verbosity_level accepts boundary values 1 and 6.""" + p1 = PendingApiCall(verbosity_level=1) + p6 = PendingApiCall(verbosity_level=6) + assert p1.verbosity_level == 1 + assert p6.verbosity_level == 6 + + +# ============================================================================= +# Test: ApiCallResult new fields +# ============================================================================= + + +class TestApiCallResultNewFields: + """Tests for the new fields on ApiCallResult.""" + + @staticmethod + def _make_result(**overrides): + """Create an ApiCallResult with sensible defaults, allowing overrides.""" + defaults = { + "sequence_number": 1, + "path": "/api/v1/test", + "verb": "POST", + "payload": None, + "verbosity_level": 3, + "response": {"RETURN_CODE": 200}, + "result": {"success": True}, + "diff": {}, + "metadata": {"action": "test"}, + "changed": False, + "failed": False, + } + defaults.update(overrides) + return ApiCallResult(**defaults) + + def test_stores_request_fields(self): + """ApiCallResult stores path, verb, payload, verbosity_level.""" + task = self._make_result( + path="/api/v1/fabrics", + verb="DELETE", + payload={"name": "FAB1"}, + verbosity_level=5, + ) + assert task.path == "/api/v1/fabrics" + assert task.verb == "DELETE" + assert task.payload == {"name": "FAB1"} + assert task.verbosity_level == 5 + + def test_verb_validator_coerces_enum(self): + """field_validator coerces HttpVerbEnum to string.""" + task = self._make_result(verb=HttpVerbEnum.PUT) + assert task.verb == "PUT" + assert isinstance(task.verb, str) + + def test_verb_validator_passes_string(self): + """field_validator passes plain strings through.""" + task = self._make_result(verb="GET") + assert task.verb == "GET" + + def test_payload_none_allowed(self): + """payload=None is valid (e.g. for GET requests).""" + task = self._make_result(payload=None) + assert task.payload is None + + def test_verbosity_level_rejects_out_of_range(self): + """verbosity_level outside 1-6 raises ValidationError.""" + with pytest.raises(Exception): + self._make_result(verbosity_level=0) + with pytest.raises(Exception): + self._make_result(verbosity_level=7) + + def test_frozen(self): + """ApiCallResult is immutable.""" + task = self._make_result() + with pytest.raises(Exception): + task.path = "/new/path" + + +# ============================================================================= +# Test: Results current-task properties (getters/setters) +# ============================================================================= + + +class TestResultsCurrentProperties: + """Tests for path_current, verb_current, payload_current, verbosity_level_current.""" + + def test_path_current_get_set(self): + """path_current getter/setter works.""" + r = Results() + assert r.path_current == "" + r.path_current = "/api/v1/foo" + assert r.path_current == "/api/v1/foo" + + def test_path_current_type_error(self): + """path_current setter rejects non-string.""" + r = Results() + with pytest.raises(TypeError, match="value must be a string"): + r.path_current = 123 + + def test_verb_current_get_set(self): + """verb_current getter/setter works.""" + r = Results() + assert r.verb_current == HttpVerbEnum.GET + r.verb_current = HttpVerbEnum.POST + assert r.verb_current == HttpVerbEnum.POST + + def test_verb_current_type_error(self): + """verb_current setter rejects non-HttpVerbEnum.""" + r = Results() + with pytest.raises(TypeError, match="value must be an HttpVerbEnum"): + r.verb_current = "POST" + + def test_payload_current_get_set(self): + """payload_current getter/setter works with dict and None.""" + r = Results() + assert r.payload_current is None + r.payload_current = {"key": "val"} + assert r.payload_current == {"key": "val"} + r.payload_current = None + assert r.payload_current is None + + def test_payload_current_type_error(self): + """payload_current setter rejects non-dict/non-None.""" + r = Results() + with pytest.raises(TypeError, match="value must be a dict or None"): + r.payload_current = "not a dict" + + def test_verbosity_level_current_get_set(self): + """verbosity_level_current getter/setter works.""" + r = Results() + assert r.verbosity_level_current == 3 + r.verbosity_level_current = 5 + assert r.verbosity_level_current == 5 + + def test_verbosity_level_current_type_error(self): + """verbosity_level_current setter rejects non-int.""" + r = Results() + with pytest.raises(TypeError, match="value must be an int"): + r.verbosity_level_current = "high" + + def test_verbosity_level_current_type_error_bool(self): + """verbosity_level_current setter rejects bool (isinstance(True, int) is True).""" + r = Results() + with pytest.raises(TypeError, match="value must be an int"): + r.verbosity_level_current = True + + def test_verbosity_level_current_value_error_low(self): + """verbosity_level_current setter rejects value < 1.""" + r = Results() + with pytest.raises(ValueError, match="value must be between 1 and 6"): + r.verbosity_level_current = 0 + + def test_verbosity_level_current_value_error_high(self): + """verbosity_level_current setter rejects value > 6.""" + r = Results() + with pytest.raises(ValueError, match="value must be between 1 and 6"): + r.verbosity_level_current = 7 + + +# ============================================================================= +# Test: register_api_call captures new fields +# ============================================================================= + + +class TestRegisterApiCallNewFields: + """Tests that register_api_call() captures the new request-side fields.""" + + def test_captures_all_new_fields(self): + """register_api_call stores path, verb, payload, verbosity_level on the task.""" + r = Results() + payload = {"fabric": "FAB1"} + _register_task(r, path="/api/v1/fabrics", verb=HttpVerbEnum.POST, payload=payload, verbosity_level=4) + + assert len(r._tasks) == 1 + task = r._tasks[0] + assert task.path == "/api/v1/fabrics" + assert task.verb == "POST" # coerced from enum + assert task.payload == {"fabric": "FAB1"} + assert task.verbosity_level == 4 + + def test_captures_none_payload(self): + """register_api_call handles None payload correctly.""" + r = Results() + _register_task(r, payload=None) + assert r._tasks[0].payload is None + + def test_payload_is_deep_copied(self): + """register_api_call deep-copies payload to prevent mutation.""" + r = Results() + payload = {"nested": {"key": "original"}} + _register_task(r, payload=payload) + # Mutate original + payload["nested"]["key"] = "mutated" + # Registered copy should be unaffected + assert r._tasks[0].payload["nested"]["key"] == "original" + + def test_defaults_when_not_set(self): + """When new fields are not explicitly set, defaults are used.""" + r = Results() + r.action = "test_action" + r.state = "merged" + r.check_mode = False + r.operation_type = OperationType.QUERY + r.response_current = {"RETURN_CODE": 200, "MESSAGE": "OK"} + r.result_current = {"success": True} + r.diff_current = {} + r.register_api_call() + + task = r._tasks[0] + assert task.path == "" + assert task.verb == "GET" + assert task.payload is None + assert task.verbosity_level == 3 + + +# ============================================================================= +# Test: aggregate properties (path, verb, payload, verbosity_level) +# ============================================================================= + + +class TestAggregateProperties: + """Tests for the aggregate list properties.""" + + def test_aggregate_properties(self): + """Aggregate properties return lists across all registered tasks.""" + r = Results() + _register_task(r, path="/api/v1/a", verb=HttpVerbEnum.GET, payload=None, verbosity_level=1) + _register_task(r, path="/api/v1/b", verb=HttpVerbEnum.POST, payload={"x": 1}, verbosity_level=5) + + assert r.path == ["/api/v1/a", "/api/v1/b"] + assert r.verb == ["GET", "POST"] + assert r.payload == [None, {"x": 1}] + assert r.verbosity_level == [1, 5] + + def test_empty_when_no_tasks(self): + """Aggregate properties return empty lists when no tasks registered.""" + r = Results() + assert r.path == [] + assert r.verb == [] + assert r.payload == [] + assert r.verbosity_level == [] + + +# ============================================================================= +# Test: build_final_result includes new fields +# ============================================================================= + + +class TestBuildFinalResultNewFields: + """Tests that build_final_result() includes the new fields.""" + + def test_final_result_includes_new_fields(self): + """build_final_result populates path, verb, payload, verbosity_level.""" + r = Results() + _register_task(r, path="/api/v1/fabrics", verb=HttpVerbEnum.DELETE, payload={"name": "F1"}, verbosity_level=2) + _register_task(r, path="/api/v1/switches", verb=HttpVerbEnum.GET, payload=None, verbosity_level=4) + + r.build_final_result() + result = r.final_result + + assert result["path"] == ["/api/v1/fabrics", "/api/v1/switches"] + assert result["verb"] == ["DELETE", "GET"] + assert result["payload"] == [{"name": "F1"}, None] + assert result["verbosity_level"] == [2, 4] + + def test_final_result_empty_tasks(self): + """build_final_result with no tasks produces empty lists for new fields.""" + r = Results() + r.build_final_result() + result = r.final_result + + assert result["path"] == [] + assert result["verb"] == [] + assert result["payload"] == [] + assert result["verbosity_level"] == [] diff --git a/tests/unit/module_utils/test_sender_nd.py b/tests/unit/module_utils/test_sender_nd.py new file mode 100644 index 00000000..5edd102f --- /dev/null +++ b/tests/unit/module_utils/test_sender_nd.py @@ -0,0 +1,906 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for sender_nd.py + +Tests the Sender class for sending REST requests via the Ansible HttpApi plugin. +""" + +# pylint: disable=unused-import +# pylint: disable=redefined-outer-name +# pylint: disable=protected-access +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=invalid-name +# pylint: disable=line-too-long +# pylint: disable=too-many-lines + +from __future__ import absolute_import, annotations, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +from unittest.mock import MagicMock, patch + +import pytest +from ansible.module_utils.connection import ConnectionError as AnsibleConnectionError +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd import Sender +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import does_not_raise + +# ============================================================================= +# Test: Sender initialization +# ============================================================================= + + +def test_sender_nd_00010(): + """ + # Summary + + Verify Sender initialization with default values. + + ## Test + + - Instance can be created with no arguments + - All attributes default to None + + ## Classes and Methods + + - Sender.__init__() + """ + with does_not_raise(): + instance = Sender() + assert instance._ansible_module is None + assert instance._connection is None + assert instance._path is None + assert instance._payload is None + assert instance._response is None + assert instance._verb is None + + +def test_sender_nd_00020(): + """ + # Summary + + Verify Sender initialization with all parameters. + + ## Test + + - Instance can be created with all optional constructor arguments + + ## Classes and Methods + + - Sender.__init__() + """ + mock_module = MagicMock() + with does_not_raise(): + instance = Sender( + ansible_module=mock_module, + verb=HttpVerbEnum.GET, + path="/api/v1/test", + payload={"key": "value"}, + ) + assert instance._ansible_module is mock_module + assert instance._path == "/api/v1/test" + assert instance._payload == {"key": "value"} + assert instance._verb == HttpVerbEnum.GET + + +# ============================================================================= +# Test: Sender.ansible_module property +# ============================================================================= + + +def test_sender_nd_00100(): + """ + # Summary + + Verify ansible_module getter raises ValueError when not set. + + ## Test + + - Accessing ansible_module before setting raises ValueError + + ## Classes and Methods + + - Sender.ansible_module (getter) + """ + instance = Sender() + match = r"Sender\.ansible_module:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.ansible_module + + +def test_sender_nd_00110(): + """ + # Summary + + Verify ansible_module setter/getter. + + ## Test + + - ansible_module can be set and retrieved + + ## Classes and Methods + + - Sender.ansible_module (setter/getter) + """ + instance = Sender() + mock_module = MagicMock() + with does_not_raise(): + instance.ansible_module = mock_module + result = instance.ansible_module + assert result is mock_module + + +# ============================================================================= +# Test: Sender.path property +# ============================================================================= + + +def test_sender_nd_00200(): + """ + # Summary + + Verify path getter raises ValueError when not set. + + ## Test + + - Accessing path before setting raises ValueError + + ## Classes and Methods + + - Sender.path (getter) + """ + instance = Sender() + match = r"Sender\.path:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.path + + +def test_sender_nd_00210(): + """ + # Summary + + Verify path setter/getter. + + ## Test + + - path can be set and retrieved + + ## Classes and Methods + + - Sender.path (setter/getter) + """ + instance = Sender() + with does_not_raise(): + instance.path = "/api/v1/test/endpoint" + result = instance.path + assert result == "/api/v1/test/endpoint" + + +# ============================================================================= +# Test: Sender.verb property +# ============================================================================= + + +def test_sender_nd_00300(): + """ + # Summary + + Verify verb getter raises ValueError when not set. + + ## Test + + - Accessing verb before setting raises ValueError + + ## Classes and Methods + + - Sender.verb (getter) + """ + instance = Sender() + match = r"Sender\.verb:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.verb + + +def test_sender_nd_00310(): + """ + # Summary + + Verify verb setter/getter with valid HttpVerbEnum. + + ## Test + + - verb can be set and retrieved with all HttpVerbEnum values + + ## Classes and Methods + + - Sender.verb (setter/getter) + """ + instance = Sender() + for verb in (HttpVerbEnum.GET, HttpVerbEnum.POST, HttpVerbEnum.PUT, HttpVerbEnum.DELETE): + with does_not_raise(): + instance.verb = verb + result = instance.verb + assert result == verb + + +def test_sender_nd_00320(): + """ + # Summary + + Verify verb setter raises TypeError for invalid value. + + ## Test + + - Setting verb to a value not in HttpVerbEnum.values() raises TypeError + + ## Classes and Methods + + - Sender.verb (setter) + """ + instance = Sender() + match = r"Sender\.verb:.*must be one of" + with pytest.raises(TypeError, match=match): + instance.verb = "INVALID" # type: ignore[assignment] + + +# ============================================================================= +# Test: Sender.payload property +# ============================================================================= + + +def test_sender_nd_00400(): + """ + # Summary + + Verify payload defaults to None. + + ## Test + + - payload is None by default + + ## Classes and Methods + + - Sender.payload (getter) + """ + instance = Sender() + with does_not_raise(): + result = instance.payload + assert result is None + + +def test_sender_nd_00410(): + """ + # Summary + + Verify payload setter/getter with valid dict. + + ## Test + + - payload can be set and retrieved + + ## Classes and Methods + + - Sender.payload (setter/getter) + """ + instance = Sender() + with does_not_raise(): + instance.payload = {"name": "test", "config": {"key": "value"}} + result = instance.payload + assert result == {"name": "test", "config": {"key": "value"}} + + +def test_sender_nd_00420(): + """ + # Summary + + Verify payload setter raises TypeError for non-dict. + + ## Test + + - Setting payload to a non-dict raises TypeError + + ## Classes and Methods + + - Sender.payload (setter) + """ + instance = Sender() + match = r"Sender\.payload:.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.payload = "not a dict" # type: ignore[assignment] + + +def test_sender_nd_00430(): + """ + # Summary + + Verify payload setter raises TypeError for list. + + ## Test + + - Setting payload to a list raises TypeError + + ## Classes and Methods + + - Sender.payload (setter) + """ + instance = Sender() + match = r"Sender\.payload:.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.payload = [1, 2, 3] # type: ignore[assignment] + + +# ============================================================================= +# Test: Sender.response property +# ============================================================================= + + +def test_sender_nd_00500(): + """ + # Summary + + Verify response getter raises ValueError when not set. + + ## Test + + - Accessing response before commit raises ValueError + + ## Classes and Methods + + - Sender.response (getter) + """ + instance = Sender() + match = r"Sender\.response:.*must be set before accessing" + with pytest.raises(ValueError, match=match): + result = instance.response + + +def test_sender_nd_00510(): + """ + # Summary + + Verify response getter returns deepcopy. + + ## Test + + - response getter returns a deepcopy of the internal response + + ## Classes and Methods + + - Sender.response (getter) + """ + instance = Sender() + instance._response = {"RETURN_CODE": 200, "MESSAGE": "OK", "DATA": {"key": "value"}} + result = instance.response + # Modify the copy + result["MODIFIED"] = True + # Verify original is unchanged + assert "MODIFIED" not in instance._response + + +def test_sender_nd_00520(): + """ + # Summary + + Verify response setter raises TypeError for non-dict. + + ## Test + + - Setting response to a non-dict raises TypeError + + ## Classes and Methods + + - Sender.response (setter) + """ + instance = Sender() + match = r"Sender\.response:.*must be a dict" + with pytest.raises(TypeError, match=match): + instance.response = "not a dict" # type: ignore[assignment] + + +def test_sender_nd_00530(): + """ + # Summary + + Verify response setter accepts valid dict. + + ## Test + + - response can be set with a valid dict + + ## Classes and Methods + + - Sender.response (setter/getter) + """ + instance = Sender() + response = {"RETURN_CODE": 200, "MESSAGE": "OK"} + with does_not_raise(): + instance.response = response + result = instance.response + assert result["RETURN_CODE"] == 200 + assert result["MESSAGE"] == "OK" + + +# ============================================================================= +# Test: Sender._normalize_response() +# ============================================================================= + + +def test_sender_nd_00600(): + """ + # Summary + + Verify _normalize_response with normal JSON response. + + ## Test + + - Response with valid DATA passes through unchanged + + ## Classes and Methods + + - Sender._normalize_response() + """ + instance = Sender() + response = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"status": "success"}, + } + result = instance._normalize_response(response) + assert result["DATA"] == {"status": "success"} + assert result["MESSAGE"] == "OK" + + +def test_sender_nd_00610(): + """ + # Summary + + Verify _normalize_response when DATA is None and raw is present. + + ## Test + + - When DATA is None and raw is present, DATA is populated with raw_response + - MESSAGE is set to indicate JSON parsing failure + + ## Classes and Methods + + - Sender._normalize_response() + """ + instance = Sender() + response = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": None, + "raw": "Not JSON", + } + result = instance._normalize_response(response) + assert result["DATA"] == {"raw_response": "Not JSON"} + assert result["MESSAGE"] == "Response could not be parsed as JSON" + + +def test_sender_nd_00620(): + """ + # Summary + + Verify _normalize_response when DATA is None, raw is present, + and MESSAGE is None. + + ## Test + + - When MESSAGE is None, it is set to indicate JSON parsing failure + + ## Classes and Methods + + - Sender._normalize_response() + """ + instance = Sender() + response = { + "RETURN_CODE": 200, + "MESSAGE": None, + "DATA": None, + "raw": "raw content", + } + result = instance._normalize_response(response) + assert result["DATA"] == {"raw_response": "raw content"} + assert result["MESSAGE"] == "Response could not be parsed as JSON" + + +def test_sender_nd_00630(): + """ + # Summary + + Verify _normalize_response when DATA is None and raw is also None. + + ## Test + + - When both DATA and raw are None, response is not modified + + ## Classes and Methods + + - Sender._normalize_response() + """ + instance = Sender() + response = { + "RETURN_CODE": 500, + "MESSAGE": "Internal Server Error", + "DATA": None, + } + result = instance._normalize_response(response) + assert result["DATA"] is None + assert result["MESSAGE"] == "Internal Server Error" + + +def test_sender_nd_00640(): + """ + # Summary + + Verify _normalize_response preserves non-OK MESSAGE when raw is present. + + ## Test + + - When DATA is None and raw is present, MESSAGE is only overwritten + if it was "OK" or None + + ## Classes and Methods + + - Sender._normalize_response() + """ + instance = Sender() + response = { + "RETURN_CODE": 500, + "MESSAGE": "Internal Server Error", + "DATA": None, + "raw": "raw error content", + } + result = instance._normalize_response(response) + assert result["DATA"] == {"raw_response": "raw error content"} + # MESSAGE is NOT overwritten because it's not "OK" or None + assert result["MESSAGE"] == "Internal Server Error" + + +# ============================================================================= +# Test: Sender.commit() with mocked Connection +# ============================================================================= + + +def test_sender_nd_00700(): + """ + # Summary + + Verify commit() with successful GET request (no payload). + + ## Test + + - commit() calls Connection.send_request with verb and path + - response is populated from the Connection response + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"status": "success"}, + } + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + with does_not_raise(): + instance.commit() + + assert instance.response["RETURN_CODE"] == 200 + assert instance.response["DATA"]["status"] == "success" + mock_connection.send_request.assert_called_once_with("GET", "/api/v1/test") + + +def test_sender_nd_00710(): + """ + # Summary + + Verify commit() with POST request including payload. + + ## Test + + - commit() calls Connection.send_request with verb, path, and JSON payload + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"status": "created"}, + } + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test/create" + instance.verb = HttpVerbEnum.POST + instance.payload = {"name": "test"} + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + with does_not_raise(): + instance.commit() + + assert instance.response["RETURN_CODE"] == 200 + assert instance.response["DATA"]["status"] == "created" + mock_connection.send_request.assert_called_once_with( + "POST", + "/api/v1/test/create", + '{"name": "test"}', + ) + + +def test_sender_nd_00720(): + """ + # Summary + + Verify commit() raises ValueError on connection failure. + + ## Test + + - When Connection.send_request raises AnsibleConnectionError, + commit() re-raises as ValueError + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.side_effect = AnsibleConnectionError("Connection refused") + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + match = r"Sender\.commit:.*ConnectionError occurred" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_sender_nd_00730(): + """ + # Summary + + Verify commit() raises ValueError on unexpected exception. + + ## Test + + - When Connection.send_request raises an unexpected Exception, + commit() wraps it in ValueError + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.side_effect = RuntimeError("Unexpected error") + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + match = r"Sender\.commit:.*Unexpected error occurred" + with pytest.raises(ValueError, match=match): + instance.commit() + + +def test_sender_nd_00740(): + """ + # Summary + + Verify commit() reuses existing connection on second call. + + ## Test + + - First commit creates a new Connection + - Second commit reuses the existing connection + - Connection constructor is called only once + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {}, + } + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ) as mock_conn_class: + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + instance.commit() + instance.commit() + + # Connection constructor should only be called once + mock_conn_class.assert_called_once() + # send_request should be called twice + assert mock_connection.send_request.call_count == 2 + + +def test_sender_nd_00750(): + """ + # Summary + + Verify commit() normalizes non-JSON responses. + + ## Test + + - When Connection returns DATA=None with raw content, + commit() normalizes the response + + ## Classes and Methods + + - Sender.commit() + - Sender._normalize_response() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": None, + "raw": "Non-JSON response", + } + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test" + instance.verb = HttpVerbEnum.GET + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + with does_not_raise(): + instance.commit() + + assert instance.response["DATA"] == {"raw_response": "Non-JSON response"} + assert instance.response["MESSAGE"] == "Response could not be parsed as JSON" + + +def test_sender_nd_00760(): + """ + # Summary + + Verify commit() with PUT request including payload. + + ## Test + + - commit() calls Connection.send_request with PUT verb, path, and JSON payload + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"status": "updated"}, + } + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test/update/12345" + instance.verb = HttpVerbEnum.PUT + instance.payload = {"status": "active"} + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + with does_not_raise(): + instance.commit() + + assert instance.response["RETURN_CODE"] == 200 + mock_connection.send_request.assert_called_once_with( + "PUT", + "/api/v1/test/update/12345", + '{"status": "active"}', + ) + + +def test_sender_nd_00770(): + """ + # Summary + + Verify commit() with DELETE request (no payload). + + ## Test + + - commit() calls Connection.send_request with DELETE verb and path + + ## Classes and Methods + + - Sender.commit() + """ + mock_module = MagicMock() + mock_module._socket_path = "/tmp/test_socket" + mock_module.params = {"config": {}} + + mock_connection = MagicMock() + mock_connection.send_request.return_value = { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "DATA": {"status": "deleted"}, + } + + instance = Sender() + instance.ansible_module = mock_module + instance.path = "/api/v1/test/delete/12345" + instance.verb = HttpVerbEnum.DELETE + + with patch( + "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + return_value=mock_connection, + ): + with does_not_raise(): + instance.commit() + + assert instance.response["RETURN_CODE"] == 200 + mock_connection.send_request.assert_called_once_with("DELETE", "/api/v1/test/delete/12345") From 06633f6f169036e76f5e5069844bec0d39220193 Mon Sep 17 00:00:00 2001 From: Allen Robel Date: Thu, 12 Mar 2026 16:29:48 -1000 Subject: [PATCH 008/109] [ignore] Fix broken imports after nd42_rest_send restructuring --- plugins/module_utils/endpoints/base.py | 5 +---- .../endpoints/v1/infra/clusterhealth_config.py | 5 +---- .../endpoints/v1/infra/clusterhealth_status.py | 5 +---- plugins/module_utils/endpoints/v1/infra/login.py | 5 +---- tests/unit/module_utils/common_utils.py | 2 +- .../module_utils/endpoints/test_base_model.py | 5 +---- tests/unit/module_utils/test_sender_nd.py | 16 ++++++++-------- 7 files changed, 14 insertions(+), 29 deletions(-) diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py index 9da9620e..3ccdff1c 100644 --- a/plugins/module_utils/endpoints/base.py +++ b/plugins/module_utils/endpoints/base.py @@ -14,10 +14,7 @@ from abc import ABC, abstractmethod -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Literal +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( BaseModel, diff --git a/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py b/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py index 607cea39..c0a7b6ca 100644 --- a/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py +++ b/plugins/module_utils/endpoints/v1/infra/clusterhealth_config.py @@ -11,10 +11,7 @@ from __future__ import absolute_import, annotations, division, print_function -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Literal +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, diff --git a/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py b/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py index 52e6cc14..ef5afd6c 100644 --- a/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py +++ b/plugins/module_utils/endpoints/v1/infra/clusterhealth_status.py @@ -11,10 +11,7 @@ from __future__ import absolute_import, annotations, division, print_function -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Literal +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, diff --git a/plugins/module_utils/endpoints/v1/infra/login.py b/plugins/module_utils/endpoints/v1/infra/login.py index 70d894d4..70968615 100644 --- a/plugins/module_utils/endpoints/v1/infra/login.py +++ b/plugins/module_utils/endpoints/v1/infra/login.py @@ -12,10 +12,7 @@ from __future__ import absolute_import, annotations, division, print_function -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Literal +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, diff --git a/tests/unit/module_utils/common_utils.py b/tests/unit/module_utils/common_utils.py index bc64b0d6..f25c31eb 100644 --- a/tests/unit/module_utils/common_utils.py +++ b/tests/unit/module_utils/common_utils.py @@ -15,7 +15,7 @@ from contextlib import contextmanager import pytest -from ansible_collections.cisco.nd.plugins.module_utils.log import Log +from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log from ansible_collections.cisco.nd.tests.unit.module_utils.fixtures.load_fixture import load_fixture from ansible_collections.cisco.nd.tests.unit.module_utils.response_generator import ResponseGenerator from ansible_collections.cisco.nd.tests.unit.module_utils.sender_file import Sender as SenderFile diff --git a/tests/unit/module_utils/endpoints/test_base_model.py b/tests/unit/module_utils/endpoints/test_base_model.py index e2db13be..a14da9d8 100644 --- a/tests/unit/module_utils/endpoints/test_base_model.py +++ b/tests/unit/module_utils/endpoints/test_base_model.py @@ -22,13 +22,10 @@ # pylint: disable=too-few-public-methods from abc import ABC, abstractmethod -from typing import TYPE_CHECKING +from typing import Literal import pytest -if TYPE_CHECKING: - from typing import Literal - from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, ) diff --git a/tests/unit/module_utils/test_sender_nd.py b/tests/unit/module_utils/test_sender_nd.py index 5edd102f..4b8d7f47 100644 --- a/tests/unit/module_utils/test_sender_nd.py +++ b/tests/unit/module_utils/test_sender_nd.py @@ -600,7 +600,7 @@ def test_sender_nd_00700(): instance.verb = HttpVerbEnum.GET with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): with does_not_raise(): @@ -643,7 +643,7 @@ def test_sender_nd_00710(): instance.payload = {"name": "test"} with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): with does_not_raise(): @@ -686,7 +686,7 @@ def test_sender_nd_00720(): instance.verb = HttpVerbEnum.GET with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): match = r"Sender\.commit:.*ConnectionError occurred" @@ -722,7 +722,7 @@ def test_sender_nd_00730(): instance.verb = HttpVerbEnum.GET with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): match = r"Sender\.commit:.*Unexpected error occurred" @@ -758,7 +758,7 @@ def test_sender_nd_00740(): } with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ) as mock_conn_class: instance = Sender() @@ -809,7 +809,7 @@ def test_sender_nd_00750(): instance.verb = HttpVerbEnum.GET with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): with does_not_raise(): @@ -851,7 +851,7 @@ def test_sender_nd_00760(): instance.payload = {"status": "active"} with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): with does_not_raise(): @@ -896,7 +896,7 @@ def test_sender_nd_00770(): instance.verb = HttpVerbEnum.DELETE with patch( - "ansible_collections.cisco.nd.plugins.module_utils.sender_nd.Connection", + "ansible_collections.cisco.nd.plugins.module_utils.rest.sender_nd.Connection", return_value=mock_connection, ): with does_not_raise(): From 888ee57a245f49d52e50777437922e49c4104066 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 13 Mar 2026 13:51:31 +0530 Subject: [PATCH 009/109] Add constants, and fix comparison of constants in RMA. --- plugins/module_utils/nd_switch_resources.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 3894de7e..b84b143d 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -78,9 +78,13 @@ # ========================================================================= -# Shared Dependency Container +# Constants & Globals # ========================================================================= +# Max hops is not supported by the module. +_DISCOVERY_MAX_HOPS: int = 0 + + @dataclass class SwitchServiceContext: """Store shared dependencies used by service classes. @@ -431,7 +435,7 @@ def bulk_discover( seed_ips = [switch.seed_ip for switch in switches] log.debug(f"Seed IPs: {seed_ips}") - max_hops = switches[0].max_hops if hasattr(switches[0], 'max_hops') else 0 + max_hops = _DISCOVERY_MAX_HOPS discovery_request = ShallowDiscoveryRequestModel( seedIpCollection=seed_ips, @@ -558,7 +562,7 @@ def build_proposed( if discovered: if cfg.role is not None: - discovered["role"] = cfg.role + discovered = {**discovered, "role": cfg.role} proposed.append( SwitchDataModel.from_response(discovered) ) @@ -1854,21 +1858,21 @@ def _validate_prerequisites( ) ) - if ad.discovery_status != DiscoveryStatus.UNREACHABLE.value: + if ad.discovery_status != DiscoveryStatus.UNREACHABLE: nd.module.fail_json( msg=( f"RMA: Switch '{old_serial}' has discovery status " - f"'{ad.discovery_status or 'unknown'}', " + f"'{ad.discovery_status.value if ad.discovery_status else 'unknown'}', " f"expected 'unreachable'. The old switch must be " f"unreachable before RMA can proceed." ) ) - if ad.system_mode != SystemMode.MAINTENANCE.value: + if ad.system_mode != SystemMode.MAINTENANCE: nd.module.fail_json( msg=( f"RMA: Switch '{old_serial}' is in " - f"'{ad.system_mode or 'unknown'}' " + f"'{ad.system_mode.value if ad.system_mode else 'unknown'}' " f"mode, expected 'maintenance'. Put the switch in " f"maintenance mode before initiating RMA." ) From 267846a2ce9f0f03f5c2996205824991f3569374 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 13 Mar 2026 16:06:36 +0530 Subject: [PATCH 010/109] Add further POAP Bootstrap Validation and Fixes --- .../nd_manage_switches/config_models.py | 48 +++++-- plugins/module_utils/nd_switch_resources.py | 123 ++++++++++++++++-- 2 files changed, 151 insertions(+), 20 deletions(-) diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index 4dca8c6b..eb912e7e 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -52,6 +52,29 @@ class ConfigDataModel(NDNestedModel): description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)" ) + @field_validator('models', mode='before') + @classmethod + def validate_models_list(cls, v: Any) -> List[str]: + """Validate models is a non-empty list of strings.""" + if v is None: + raise ValueError( + "'models' is required in config_data. " + "Provide a list of module model strings, " + "e.g. models: [N9K-X9364v, N9K-vSUP]" + ) + if not isinstance(v, list): + raise ValueError( + f"'models' must be a list of module model strings, got: {type(v).__name__}. " + f"e.g. models: [N9K-X9364v, N9K-vSUP]" + ) + if len(v) == 0: + raise ValueError( + "'models' list cannot be empty. " + "Provide at least one module model string, " + "e.g. models: [N9K-X9364v, N9K-vSUP]" + ) + return v + @field_validator('gateway', mode='before') @classmethod def validate_gateway(cls, v: str) -> str: @@ -149,21 +172,25 @@ def validate_operation_type(self) -> Self: @model_validator(mode='after') def validate_required_fields_for_non_swap(self) -> Self: - """Validate model/version/hostname/config_data are all provided for non-swap POAP. + """Validate model/version/hostname/config_data for pre-provision operations. + + Pre-provision (preprovision_serial only): + model, version, hostname, config_data are all mandatory because the + controller has no physical switch to pull these values from. - For Bootstrap (serial_number only) or Pre-provision (preprovision_serial only) - all four descriptor fields are mandatory. This mirrors the - dcnm_inventory.py check: - if only one serial provided → model, version, hostname, config_data required. + Bootstrap (serial_number only): + These fields are optional — they can be omitted and the module will + pull them from the bootstrap GET API response at runtime. If + provided, they are validated against the bootstrap data before import. - When both serials are present (swap mode), these fields are not - required because the swap API only needs the new serial number. + Swap (both serials present): + No check needed — the swap API only requires the new serial number. """ has_serial = bool(self.serial_number) has_preprov = bool(self.preprovision_serial) - # XOR: exactly one serial → non-swap case - if has_serial != has_preprov: + # Pre-provision only: all four descriptor fields are mandatory + if has_preprov and not has_serial: missing = [] if not self.model: missing.append("model") @@ -174,10 +201,9 @@ def validate_required_fields_for_non_swap(self) -> Self: if not self.config_data: missing.append("config_data") if missing: - op = "Bootstrap" if has_serial else "Pre-provisioning" raise ValueError( f"model, version, hostname and config_data are required for " - f"{op} a switch. Missing: {', '.join(missing)}" + f"Pre-provisioning a switch. Missing: {', '.join(missing)}" ) return self diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index b84b143d..1b67e18a 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -1185,6 +1185,10 @@ def _handle_poap_bootstrap( log.error(msg) nd.module.fail_json(msg=msg) + # Validate user-supplied fields against bootstrap data (if provided) + # and warn about any fields that will be pulled from the API. + self._validate_bootstrap_fields(poap_cfg, bootstrap_data, log) + model = self._build_bootstrap_import_model( switch_cfg, poap_cfg, bootstrap_data ) @@ -1215,6 +1219,90 @@ def _handle_poap_bootstrap( log.debug("EXIT: _handle_poap_bootstrap()") + def _validate_bootstrap_fields( + self, + poap_cfg: POAPConfigModel, + bootstrap_data: Dict[str, Any], + log: logging.Logger, + ) -> None: + """Validate user-supplied bootstrap fields against the bootstrap API response. + + If a field is provided in the playbook config, it must match what the + bootstrap API reports. Fields that are omitted are silently filled in + from the API at import time — no error is raised for those. + + Args: + poap_cfg: POAP config entry from the playbook. + bootstrap_data: Matching entry from the bootstrap GET API. + log: Logger instance. + + Returns: + None. + """ + serial = poap_cfg.serial_number + bs_data = bootstrap_data.get("data") or {} + mismatches: List[str] = [] + + if poap_cfg.model and poap_cfg.model != bootstrap_data.get("model"): + mismatches.append( + f"model: provided '{poap_cfg.model}', " + f"bootstrap reports '{bootstrap_data.get('model')}'" + ) + + if poap_cfg.version and poap_cfg.version != bootstrap_data.get("softwareVersion"): + mismatches.append( + f"version: provided '{poap_cfg.version}', " + f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'" + ) + + if poap_cfg.config_data: + bs_gateway = ( + bootstrap_data.get("gatewayIpMask") + or bs_data.get("gatewayIpMask") + ) + if poap_cfg.config_data.gateway and poap_cfg.config_data.gateway != bs_gateway: + mismatches.append( + f"config_data.gateway: provided '{poap_cfg.config_data.gateway}', " + f"bootstrap reports '{bs_gateway}'" + ) + + bs_models = bs_data.get("models", []) + if ( + poap_cfg.config_data.models + and sorted(poap_cfg.config_data.models) != sorted(bs_models) + ): + mismatches.append( + f"config_data.models: provided {poap_cfg.config_data.models}, " + f"bootstrap reports {bs_models}" + ) + + if mismatches: + self.ctx.nd.module.fail_json( + msg=( + f"Bootstrap field mismatch for serial '{serial}'. " + f"The following provided values do not match the " + f"bootstrap API data:\n" + + "\n".join(f" - {m}" for m in mismatches) + ) + ) + + # Log which fields will be sourced from the bootstrap API + pulled: List[str] = [] + if not poap_cfg.model: + pulled.append("model") + if not poap_cfg.version: + pulled.append("version") + if not poap_cfg.hostname: + pulled.append("hostname") + if not poap_cfg.config_data: + pulled.append("config_data (gateway + models)") + if pulled: + log.info( + f"Bootstrap serial '{serial}': the following fields were not " + f"provided and will be sourced from the bootstrap API: " + f"{', '.join(pulled)}" + ) + def _build_bootstrap_import_model( self, switch_cfg: SwitchConfigModel, @@ -1237,31 +1325,48 @@ def _build_bootstrap_import_model( ) bs = bootstrap_data or {} + bs_data = bs.get("data") or {} - # User config fields serial_number = poap_cfg.serial_number - hostname = poap_cfg.hostname ip = switch_cfg.seed_ip - model = poap_cfg.model - version = poap_cfg.version - image_policy = poap_cfg.image_policy - gateway_ip_mask = poap_cfg.config_data.gateway if poap_cfg.config_data else None switch_role = switch_cfg.role password = switch_cfg.password auth_proto = SnmpV3AuthProtocol.MD5 # POAP/bootstrap always uses MD5 + image_policy = poap_cfg.image_policy discovery_username = getattr(poap_cfg, "discovery_username", None) discovery_password = getattr(poap_cfg, "discovery_password", None) + # Use user-provided values when available; fall back to bootstrap API data. + model = poap_cfg.model or bs.get("model", "") + version = poap_cfg.version or bs.get("softwareVersion", "") + hostname = poap_cfg.hostname or bs.get("hostname", "") + + gateway_ip_mask = ( + (poap_cfg.config_data.gateway if poap_cfg.config_data else None) + or bs.get("gatewayIpMask") + or bs_data.get("gatewayIpMask") + ) + data_models = ( + (poap_cfg.config_data.models if poap_cfg.config_data else None) + or bs_data.get("models", []) + ) + + # Build the data block from resolved values (replaces build_poap_data_block) + data_block: Optional[Dict[str, Any]] = None + if gateway_ip_mask or data_models: + data_block = {} + if gateway_ip_mask: + data_block["gatewayIpMask"] = gateway_ip_mask + if data_models: + data_block["models"] = data_models + # Bootstrap API response fields fingerprint = bs.get("fingerPrint", bs.get("fingerprint", "")) public_key = bs.get("publicKey", "") re_add = bs.get("reAdd", False) in_inventory = bs.get("inInventory", False) - # Shared data block builder - data_block = build_poap_data_block(poap_cfg) - bootstrap_model = BootstrapImportSwitchModel( serialNumber=serial_number, model=model, From 9bd95af5fa9f1faa7aebdbdc56577003506c9a9b Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 13 Mar 2026 17:16:49 +0530 Subject: [PATCH 011/109] Rebasing Mixins and Endpoints with Latest Endpoint Changes --- plugins/module_utils/endpoints/mixins.py | 24 +++++++ .../manage/nd_manage_switches/credentials.py | 27 +++----- .../nd_manage_switches/fabric_bootstrap.py | 31 +++------ .../nd_manage_switches/fabric_config.py | 28 ++------ .../nd_manage_switches/fabric_discovery.py | 16 ++--- .../fabric_switch_actions.py | 68 ++++--------------- .../nd_manage_switches/fabric_switches.py | 66 +++++------------- 7 files changed, 83 insertions(+), 177 deletions(-) diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index 47695611..a6065b8c 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -32,6 +32,12 @@ class FabricNameMixin(BaseModel): fabric_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Fabric name") +class FilterMixin(BaseModel): + """Mixin for endpoints that require a Lucene filter expression.""" + + filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") + + class ForceShowRunMixin(BaseModel): """Mixin for endpoints that require force_show_run parameter.""" @@ -62,6 +68,12 @@ class LoginIdMixin(BaseModel): login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") +class MaxMixin(BaseModel): + """Mixin for endpoints that require a max results parameter.""" + + max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") + + class NetworkNameMixin(BaseModel): """Mixin for endpoints that require network_name parameter.""" @@ -74,12 +86,24 @@ class NodeNameMixin(BaseModel): node_name: Optional[str] = Field(default=None, min_length=1, description="Node name") +class OffsetMixin(BaseModel): + """Mixin for endpoints that require a pagination offset parameter.""" + + offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") + + class SwitchSerialNumberMixin(BaseModel): """Mixin for endpoints that require switch_sn parameter.""" switch_sn: Optional[str] = Field(default=None, min_length=1, description="Switch serial number") +class TicketIdMixin(BaseModel): + """Mixin for endpoints that require ticket_id parameter.""" + + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + class VrfNameMixin(BaseModel): """Mixin for endpoints that require vrf_name parameter.""" diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py index 242948a7..1576e9b7 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py @@ -23,9 +23,12 @@ __author__ = "Akshayanat Chengam Saravanan" # pylint: enable=invalid-name -from typing import Literal, Optional +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + TicketIdMixin, +) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, ) @@ -33,16 +36,14 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) -class CredentialsSwitchesEndpointParams(EndpointQueryParams): +class CredentialsSwitchesEndpointParams(TicketIdMixin, EndpointQueryParams): """ # Summary @@ -50,7 +51,7 @@ class CredentialsSwitchesEndpointParams(EndpointQueryParams): ## Parameters - - ticket_id: Change control ticket ID (optional) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) ## Usage @@ -61,10 +62,8 @@ class CredentialsSwitchesEndpointParams(EndpointQueryParams): ``` """ - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") - -class _V1ManageCredentialsSwitchesBase(BaseModel): +class _V1ManageCredentialsSwitchesBase(NDEndpointBaseModel): """ Base class for Credentials Switches endpoints. @@ -72,8 +71,6 @@ class _V1ManageCredentialsSwitchesBase(BaseModel): /api/v1/manage/credentials/switches endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -120,10 +117,6 @@ class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageCredentialsSwitchesPost"] = Field( default="V1ManageCredentialsSwitchesPost", description="Class name for backward compatibility" ) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py index d2e07828..cced1ce5 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py @@ -25,6 +25,9 @@ from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( FabricNameMixin, + FilterMixin, + MaxMixin, + OffsetMixin, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, @@ -33,16 +36,14 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) -class FabricBootstrapEndpointParams(EndpointQueryParams): +class FabricBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): """ # Summary @@ -50,9 +51,9 @@ class FabricBootstrapEndpointParams(EndpointQueryParams): ## Parameters - - max: Maximum number of results to return (optional) - - offset: Pagination offset (optional) - - filter: Lucene filter expression (optional) + - max: Maximum number of results to return (optional, from `MaxMixin`) + - offset: Pagination offset (optional, from `OffsetMixin`) + - filter: Lucene filter expression (optional, from `FilterMixin`) ## Usage @@ -63,12 +64,8 @@ class FabricBootstrapEndpointParams(EndpointQueryParams): ``` """ - max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") - offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") - filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") - -class _V1ManageFabricBootstrapBase(FabricNameMixin, BaseModel): +class _V1ManageFabricBootstrapBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Bootstrap endpoints. @@ -76,8 +73,6 @@ class _V1ManageFabricBootstrapBase(FabricNameMixin, BaseModel): /api/v1/manage/fabrics/{fabricName}/bootstrap endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -131,10 +126,6 @@ class V1ManageFabricBootstrapGet(_V1ManageFabricBootstrapBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricBootstrapGet"] = Field( default="V1ManageFabricBootstrapGet", description="Class name for backward compatibility" ) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py index 078afc6c..e4d8e595 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py @@ -36,13 +36,11 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) class FabricConfigDeployEndpointParams(EndpointQueryParams): @@ -69,7 +67,7 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") -class _V1ManageFabricConfigBase(FabricNameMixin, BaseModel): +class _V1ManageFabricConfigBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Config endpoints. @@ -77,8 +75,6 @@ class _V1ManageFabricConfigBase(FabricNameMixin, BaseModel): /api/v1/manage/fabrics/{fabricName} endpoint family. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -115,10 +111,6 @@ class V1ManageFabricConfigSavePost(_V1ManageFabricConfigBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricConfigSavePost"] = Field( default="V1ManageFabricConfigSavePost", description="Class name for backward compatibility" ) @@ -177,10 +169,6 @@ class V1ManageFabricConfigDeployPost(_V1ManageFabricConfigBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricConfigDeployPost"] = Field( default="V1ManageFabricConfigDeployPost", description="Class name for backward compatibility" ) @@ -239,10 +227,6 @@ class V1ManageFabricGet(_V1ManageFabricConfigBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricGet"] = Field( default="V1ManageFabricGet", description="Class name for backward compatibility" ) @@ -286,10 +270,6 @@ class V1ManageFabricInventoryDiscoverGet(_V1ManageFabricConfigBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricInventoryDiscoverGet"] = Field( default="V1ManageFabricInventoryDiscoverGet", description="Class name for backward compatibility" ) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py index 928b4b67..471d2d9b 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py @@ -30,16 +30,14 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) -class _V1ManageFabricDiscoveryBase(FabricNameMixin, BaseModel): +class _V1ManageFabricDiscoveryBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Discovery endpoints. @@ -47,8 +45,6 @@ class _V1ManageFabricDiscoveryBase(FabricNameMixin, BaseModel): /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -85,10 +81,6 @@ class V1ManageFabricShallowDiscoveryPost(_V1ManageFabricDiscoveryBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricShallowDiscoveryPost"] = Field( default="V1ManageFabricShallowDiscoveryPost", description="Class name for backward compatibility" ) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py index 6b90f160..86964dfd 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py @@ -29,8 +29,10 @@ from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, FabricNameMixin, SwitchSerialNumberMixin, + TicketIdMixin, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, @@ -39,13 +41,11 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) # ============================================================================ @@ -53,7 +53,7 @@ # ============================================================================ -class SwitchActionsRemoveEndpointParams(EndpointQueryParams): +class SwitchActionsRemoveEndpointParams(TicketIdMixin, EndpointQueryParams): """ # Summary @@ -74,10 +74,9 @@ class SwitchActionsRemoveEndpointParams(EndpointQueryParams): """ force: Optional[bool] = Field(default=None, description="Force removal of switches") - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") -class SwitchActionsTicketEndpointParams(EndpointQueryParams): +class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): """ # Summary @@ -96,10 +95,8 @@ class SwitchActionsTicketEndpointParams(EndpointQueryParams): ``` """ - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") - -class SwitchActionsImportEndpointParams(EndpointQueryParams): +class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): """ # Summary @@ -107,8 +104,8 @@ class SwitchActionsImportEndpointParams(EndpointQueryParams): ## Parameters - - cluster_name: Target cluster name for multi-cluster deployments (optional) - - ticket_id: Change control ticket ID (optional) + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) ## Usage @@ -119,16 +116,13 @@ class SwitchActionsImportEndpointParams(EndpointQueryParams): ``` """ - cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") - # ============================================================================ # Switch Actions Endpoints # ============================================================================ -class _V1ManageFabricSwitchActionsBase(FabricNameMixin, BaseModel): +class _V1ManageFabricSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Switch Actions endpoints. @@ -136,8 +130,6 @@ class _V1ManageFabricSwitchActionsBase(FabricNameMixin, BaseModel): /api/v1/manage/fabrics/{fabricName}/switchActions endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -190,10 +182,6 @@ class V1ManageFabricSwitchActionsRemovePost(_V1ManageFabricSwitchActionsBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchActionsRemovePost"] = Field( default="V1ManageFabricSwitchActionsRemovePost", description="Class name for backward compatibility" ) @@ -266,10 +254,6 @@ class V1ManageFabricSwitchActionsChangeRolesPost(_V1ManageFabricSwitchActionsBas ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchActionsChangeRolesPost"] = Field( default="V1ManageFabricSwitchActionsChangeRolesPost", description="Class name for backward compatibility", @@ -345,10 +329,6 @@ class V1ManageFabricSwitchActionsImportBootstrapPost(_V1ManageFabricSwitchAction ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchActionsImportBootstrapPost"] = Field( default="V1ManageFabricSwitchActionsImportBootstrapPost", description="Class name for backward compatibility" ) @@ -431,10 +411,6 @@ class V1ManageFabricSwitchActionsPreProvisionPost(_V1ManageFabricSwitchActionsBa ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchActionsPreProvisionPost"] = Field( default="V1ManageFabricSwitchActionsPreProvisionPost", description="Class name for backward compatibility", @@ -471,7 +447,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class _V1ManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): +class _V1ManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): """ Base class for per-switch action endpoints. @@ -479,8 +455,6 @@ class _V1ManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNum /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -535,10 +509,6 @@ class V1ManageFabricSwitchProvisionRMAPost(_V1ManageFabricSwitchActionsPerSwitch ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchProvisionRMAPost"] = Field( default="V1ManageFabricSwitchProvisionRMAPost", description="Class name for backward compatibility" ) @@ -574,7 +544,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class SwitchActionsClusterEndpointParams(EndpointQueryParams): +class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): """ # Summary @@ -582,7 +552,7 @@ class SwitchActionsClusterEndpointParams(EndpointQueryParams): ## Parameters - - cluster_name: Target cluster name for multi-cluster deployments (optional) + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) ## Usage @@ -593,8 +563,6 @@ class SwitchActionsClusterEndpointParams(EndpointQueryParams): ``` """ - cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") - class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPerSwitchBase): """ @@ -640,10 +608,6 @@ class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPer ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchChangeSerialNumberPost"] = Field( default="V1ManageFabricSwitchChangeSerialNumberPost", description="Class name for backward compatibility" ) @@ -721,10 +685,6 @@ class V1ManageFabricSwitchActionsRediscoverPost(_V1ManageFabricSwitchActionsBase ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchActionsRediscoverPost"] = Field( default="V1ManageFabricSwitchActionsRediscoverPost", description="Class name for backward compatibility", diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py index 9594a64c..c9cc7e36 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py @@ -25,8 +25,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, FabricNameMixin, - SwitchSerialNumberMixin, + FilterMixin, + MaxMixin, + OffsetMixin, + TicketIdMixin, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( EndpointQueryParams, @@ -35,16 +39,14 @@ BasePath, ) from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - BaseModel, - ConfigDict, Field, ) - -# Common config for basic validation -COMMON_CONFIG = ConfigDict(validate_assignment=True) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) -class FabricSwitchesGetEndpointParams(EndpointQueryParams): +class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): """ # Summary @@ -53,9 +55,9 @@ class FabricSwitchesGetEndpointParams(EndpointQueryParams): ## Parameters - hostname: Filter by switch hostname (optional) - - max: Maximum number of results (optional) - - offset: Pagination offset (optional) - - filter: Lucene filter expression (optional) + - max: Maximum number of results (optional, from `MaxMixin`) + - offset: Pagination offset (optional, from `OffsetMixin`) + - filter: Lucene filter expression (optional, from `FilterMixin`) ## Usage @@ -67,12 +69,9 @@ class FabricSwitchesGetEndpointParams(EndpointQueryParams): """ hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") - max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") - offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") - filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") -class FabricSwitchesAddEndpointParams(EndpointQueryParams): +class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): """ # Summary @@ -80,8 +79,8 @@ class FabricSwitchesAddEndpointParams(EndpointQueryParams): ## Parameters - - cluster_name: Target cluster name for multi-cluster deployments (optional) - - ticket_id: Change control ticket ID (optional) + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) ## Usage @@ -92,11 +91,8 @@ class FabricSwitchesAddEndpointParams(EndpointQueryParams): ``` """ - cluster_name: Optional[str] = Field(default=None, min_length=1, description="Target cluster name") - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") - -class _V1ManageFabricSwitchesBase(FabricNameMixin, BaseModel): +class _V1ManageFabricSwitchesBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Switches endpoints. @@ -104,8 +100,6 @@ class _V1ManageFabricSwitchesBase(FabricNameMixin, BaseModel): /api/v1/manage/fabrics/{fabricName}/switches endpoint. """ - model_config = COMMON_CONFIG - @property def _base_path(self) -> str: """Build the base endpoint path.""" @@ -160,10 +154,6 @@ class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchesGet"] = Field( default="V1ManageFabricSwitchesGet", description="Class name for backward compatibility" ) @@ -237,10 +227,6 @@ class V1ManageFabricSwitchesPost(_V1ManageFabricSwitchesBase): ``` """ - # Version metadata - api_version: Literal["v1"] = Field(default="v1", description="ND API version for this endpoint") - min_controller_version: str = Field(default="3.0.0", description="Minimum ND version supporting this endpoint") - class_name: Literal["V1ManageFabricSwitchesPost"] = Field( default="V1ManageFabricSwitchesPost", description="Class name for backward compatibility" ) @@ -268,23 +254,3 @@ def path(self) -> str: def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.POST - - -class _V1ManageFabricSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, BaseModel): - """ - Base class for single switch endpoints. - - Provides common functionality for all HTTP methods on the - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn} endpoint. - """ - - model_config = COMMON_CONFIG - - @property - def _base_path(self) -> str: - """Build the base endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - if self.switch_sn is None: - raise ValueError("switch_sn must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn) From 522f3602e02bf50d7b412e7cd8458a8c5a8b18d7 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 16 Mar 2026 11:33:40 +0530 Subject: [PATCH 012/109] Refactor Endpoints for consistency --- .../manage/nd_manage_switches/credentials.py | 12 +-- .../nd_manage_switches/fabric_bootstrap.py | 12 +-- .../nd_manage_switches/fabric_config.py | 36 ++++----- .../nd_manage_switches/fabric_discovery.py | 10 +-- .../fabric_switch_actions.py | 74 +++++++++---------- .../nd_manage_switches/fabric_switches.py | 22 +++--- plugins/module_utils/nd_switch_resources.py | 40 +++++----- .../nd_manage_switches/bootstrap_utils.py | 4 +- .../utils/nd_manage_switches/fabric_utils.py | 12 +-- .../nd_manage_switches/switch_wait_utils.py | 12 +-- 10 files changed, 117 insertions(+), 117 deletions(-) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py index 1576e9b7..9ca94d09 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py @@ -63,7 +63,7 @@ class CredentialsSwitchesEndpointParams(TicketIdMixin, EndpointQueryParams): """ -class _V1ManageCredentialsSwitchesBase(NDEndpointBaseModel): +class _EpManageCredentialsSwitchesBase(NDEndpointBaseModel): """ Base class for Credentials Switches endpoints. @@ -77,7 +77,7 @@ def _base_path(self) -> str: return BasePath.path("credentials", "switches") -class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): +class EpManageCredentialsSwitchesPost(_EpManageCredentialsSwitchesBase): """ # Summary @@ -104,12 +104,12 @@ class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): ```python # Create credentials without ticket - request = V1ManageCredentialsSwitchesPost() + request = EpManageCredentialsSwitchesPost() path = request.path verb = request.verb # Create credentials with change control ticket - request = V1ManageCredentialsSwitchesPost() + request = EpManageCredentialsSwitchesPost() request.endpoint_params.ticket_id = "CHG12345" path = request.path verb = request.verb @@ -117,8 +117,8 @@ class V1ManageCredentialsSwitchesPost(_V1ManageCredentialsSwitchesBase): ``` """ - class_name: Literal["V1ManageCredentialsSwitchesPost"] = Field( - default="V1ManageCredentialsSwitchesPost", description="Class name for backward compatibility" + class_name: Literal["EpManageCredentialsSwitchesPost"] = Field( + default="EpManageCredentialsSwitchesPost", description="Class name for backward compatibility" ) endpoint_params: CredentialsSwitchesEndpointParams = Field( default_factory=CredentialsSwitchesEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py index cced1ce5..25432637 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py @@ -65,7 +65,7 @@ class FabricBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, Endpoint """ -class _V1ManageFabricBootstrapBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricBootstrapBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Bootstrap endpoints. @@ -81,7 +81,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "bootstrap") -class V1ManageFabricBootstrapGet(_V1ManageFabricBootstrapBase): +class EpManageFabricBootstrapGet(_EpManageFabricBootstrapBase): """ # Summary @@ -110,13 +110,13 @@ class V1ManageFabricBootstrapGet(_V1ManageFabricBootstrapBase): ```python # List all bootstrap switches - request = V1ManageFabricBootstrapGet() + request = EpManageFabricBootstrapGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb # List with pagination - request = V1ManageFabricBootstrapGet() + request = EpManageFabricBootstrapGet() request.fabric_name = "MyFabric" request.endpoint_params.max = 50 request.endpoint_params.offset = 0 @@ -126,8 +126,8 @@ class V1ManageFabricBootstrapGet(_V1ManageFabricBootstrapBase): ``` """ - class_name: Literal["V1ManageFabricBootstrapGet"] = Field( - default="V1ManageFabricBootstrapGet", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricBootstrapGet"] = Field( + default="EpManageFabricBootstrapGet", description="Class name for backward compatibility" ) endpoint_params: FabricBootstrapEndpointParams = Field( default_factory=FabricBootstrapEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py index e4d8e595..5ab75028 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py @@ -67,7 +67,7 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") -class _V1ManageFabricConfigBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricConfigBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Config endpoints. @@ -83,7 +83,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name) -class V1ManageFabricConfigSavePost(_V1ManageFabricConfigBase): +class EpManageFabricConfigSavePost(_EpManageFabricConfigBase): """ # Summary @@ -104,15 +104,15 @@ class V1ManageFabricConfigSavePost(_V1ManageFabricConfigBase): ## Usage ```python - request = V1ManageFabricConfigSavePost() + request = EpManageFabricConfigSavePost() request.fabric_name = "MyFabric" path = request.path verb = request.verb ``` """ - class_name: Literal["V1ManageFabricConfigSavePost"] = Field( - default="V1ManageFabricConfigSavePost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricConfigSavePost"] = Field( + default="EpManageFabricConfigSavePost", description="Class name for backward compatibility" ) @property @@ -126,7 +126,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricConfigDeployPost(_V1ManageFabricConfigBase): +class EpManageFabricConfigDeployPost(_EpManageFabricConfigBase): """ # Summary @@ -154,13 +154,13 @@ class V1ManageFabricConfigDeployPost(_V1ManageFabricConfigBase): ```python # Deploy with defaults - request = V1ManageFabricConfigDeployPost() + request = EpManageFabricConfigDeployPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Deploy forcing show run - request = V1ManageFabricConfigDeployPost() + request = EpManageFabricConfigDeployPost() request.fabric_name = "MyFabric" request.endpoint_params.force_show_run = True path = request.path @@ -169,8 +169,8 @@ class V1ManageFabricConfigDeployPost(_V1ManageFabricConfigBase): ``` """ - class_name: Literal["V1ManageFabricConfigDeployPost"] = Field( - default="V1ManageFabricConfigDeployPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricConfigDeployPost"] = Field( + default="EpManageFabricConfigDeployPost", description="Class name for backward compatibility" ) endpoint_params: FabricConfigDeployEndpointParams = Field( default_factory=FabricConfigDeployEndpointParams, description="Endpoint-specific query parameters" @@ -199,7 +199,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricGet(_V1ManageFabricConfigBase): +class EpManageFabricGet(_EpManageFabricConfigBase): """ # Summary @@ -220,15 +220,15 @@ class V1ManageFabricGet(_V1ManageFabricConfigBase): ## Usage ```python - request = V1ManageFabricGet() + request = EpManageFabricGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb ``` """ - class_name: Literal["V1ManageFabricGet"] = Field( - default="V1ManageFabricGet", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricGet"] = Field( + default="EpManageFabricGet", description="Class name for backward compatibility" ) @property @@ -242,7 +242,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.GET -class V1ManageFabricInventoryDiscoverGet(_V1ManageFabricConfigBase): +class EpManageFabricInventoryDiscoverGet(_EpManageFabricConfigBase): """ # Summary @@ -263,15 +263,15 @@ class V1ManageFabricInventoryDiscoverGet(_V1ManageFabricConfigBase): ## Usage ```python - request = V1ManageFabricInventoryDiscoverGet() + request = EpManageFabricInventoryDiscoverGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb ``` """ - class_name: Literal["V1ManageFabricInventoryDiscoverGet"] = Field( - default="V1ManageFabricInventoryDiscoverGet", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricInventoryDiscoverGet"] = Field( + default="EpManageFabricInventoryDiscoverGet", description="Class name for backward compatibility" ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py index 471d2d9b..e2416f98 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py @@ -37,7 +37,7 @@ ) -class _V1ManageFabricDiscoveryBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricDiscoveryBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Discovery endpoints. @@ -53,7 +53,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "actions", "shallowDiscovery") -class V1ManageFabricShallowDiscoveryPost(_V1ManageFabricDiscoveryBase): +class EpManageFabricShallowDiscoveryPost(_EpManageFabricDiscoveryBase): """ # Summary @@ -74,15 +74,15 @@ class V1ManageFabricShallowDiscoveryPost(_V1ManageFabricDiscoveryBase): ## Usage ```python - request = V1ManageFabricShallowDiscoveryPost() + request = EpManageFabricShallowDiscoveryPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb ``` """ - class_name: Literal["V1ManageFabricShallowDiscoveryPost"] = Field( - default="V1ManageFabricShallowDiscoveryPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricShallowDiscoveryPost"] = Field( + default="EpManageFabricShallowDiscoveryPost", description="Class name for backward compatibility" ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py index 86964dfd..40ea5808 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py @@ -122,7 +122,7 @@ class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, Endpoin # ============================================================================ -class _V1ManageFabricSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Switch Actions endpoints. @@ -138,7 +138,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "switchActions") -class V1ManageFabricSwitchActionsRemovePost(_V1ManageFabricSwitchActionsBase): +class EpManageFabricSwitchActionsRemovePost(_EpManageFabricSwitchActionsBase): """ # Summary @@ -166,13 +166,13 @@ class V1ManageFabricSwitchActionsRemovePost(_V1ManageFabricSwitchActionsBase): ```python # Remove switches - request = V1ManageFabricSwitchActionsRemovePost() + request = EpManageFabricSwitchActionsRemovePost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Remove switches with force and ticket - request = V1ManageFabricSwitchActionsRemovePost() + request = EpManageFabricSwitchActionsRemovePost() request.fabric_name = "MyFabric" request.endpoint_params.force = True request.endpoint_params.ticket_id = "CHG12345" @@ -182,8 +182,8 @@ class V1ManageFabricSwitchActionsRemovePost(_V1ManageFabricSwitchActionsBase): ``` """ - class_name: Literal["V1ManageFabricSwitchActionsRemovePost"] = Field( - default="V1ManageFabricSwitchActionsRemovePost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchActionsRemovePost"] = Field( + default="EpManageFabricSwitchActionsRemovePost", description="Class name for backward compatibility" ) endpoint_params: SwitchActionsRemoveEndpointParams = Field( default_factory=SwitchActionsRemoveEndpointParams, description="Endpoint-specific query parameters" @@ -212,7 +212,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricSwitchActionsChangeRolesPost(_V1ManageFabricSwitchActionsBase): +class EpManageFabricSwitchActionsChangeRolesPost(_EpManageFabricSwitchActionsBase): """ # Summary @@ -239,13 +239,13 @@ class V1ManageFabricSwitchActionsChangeRolesPost(_V1ManageFabricSwitchActionsBas ```python # Change roles - request = V1ManageFabricSwitchActionsChangeRolesPost() + request = EpManageFabricSwitchActionsChangeRolesPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Change roles with change control ticket - request = V1ManageFabricSwitchActionsChangeRolesPost() + request = EpManageFabricSwitchActionsChangeRolesPost() request.fabric_name = "MyFabric" request.endpoint_params.ticket_id = "CHG12345" path = request.path @@ -254,8 +254,8 @@ class V1ManageFabricSwitchActionsChangeRolesPost(_V1ManageFabricSwitchActionsBas ``` """ - class_name: Literal["V1ManageFabricSwitchActionsChangeRolesPost"] = Field( - default="V1ManageFabricSwitchActionsChangeRolesPost", + class_name: Literal["EpManageFabricSwitchActionsChangeRolesPost"] = Field( + default="EpManageFabricSwitchActionsChangeRolesPost", description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( @@ -285,7 +285,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class V1ManageFabricSwitchActionsImportBootstrapPost(_V1ManageFabricSwitchActionsBase): +class EpManageFabricSwitchActionsImportBootstrapPost(_EpManageFabricSwitchActionsBase): """ # Summary @@ -313,13 +313,13 @@ class V1ManageFabricSwitchActionsImportBootstrapPost(_V1ManageFabricSwitchAction ```python # Import bootstrap switches - request = V1ManageFabricSwitchActionsImportBootstrapPost() + request = EpManageFabricSwitchActionsImportBootstrapPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Import with cluster and ticket - request = V1ManageFabricSwitchActionsImportBootstrapPost() + request = EpManageFabricSwitchActionsImportBootstrapPost() request.fabric_name = "MyFabric" request.endpoint_params.cluster_name = "cluster1" request.endpoint_params.ticket_id = "CHG12345" @@ -329,8 +329,8 @@ class V1ManageFabricSwitchActionsImportBootstrapPost(_V1ManageFabricSwitchAction ``` """ - class_name: Literal["V1ManageFabricSwitchActionsImportBootstrapPost"] = Field( - default="V1ManageFabricSwitchActionsImportBootstrapPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchActionsImportBootstrapPost"] = Field( + default="EpManageFabricSwitchActionsImportBootstrapPost", description="Class name for backward compatibility" ) endpoint_params: SwitchActionsImportEndpointParams = Field( default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" @@ -364,7 +364,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class V1ManageFabricSwitchActionsPreProvisionPost(_V1ManageFabricSwitchActionsBase): +class EpManageFabricSwitchActionsPreProvisionPost(_EpManageFabricSwitchActionsBase): """ # Summary @@ -395,13 +395,13 @@ class V1ManageFabricSwitchActionsPreProvisionPost(_V1ManageFabricSwitchActionsBa ```python # Pre-provision switches - request = V1ManageFabricSwitchActionsPreProvisionPost() + request = EpManageFabricSwitchActionsPreProvisionPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Pre-provision with cluster and ticket - request = V1ManageFabricSwitchActionsPreProvisionPost() + request = EpManageFabricSwitchActionsPreProvisionPost() request.fabric_name = "MyFabric" request.endpoint_params.cluster_name = "cluster1" request.endpoint_params.ticket_id = "CHG12345" @@ -411,8 +411,8 @@ class V1ManageFabricSwitchActionsPreProvisionPost(_V1ManageFabricSwitchActionsBa ``` """ - class_name: Literal["V1ManageFabricSwitchActionsPreProvisionPost"] = Field( - default="V1ManageFabricSwitchActionsPreProvisionPost", + class_name: Literal["EpManageFabricSwitchActionsPreProvisionPost"] = Field( + default="EpManageFabricSwitchActionsPreProvisionPost", description="Class name for backward compatibility", ) endpoint_params: SwitchActionsImportEndpointParams = Field( @@ -447,7 +447,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class _V1ManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): +class _EpManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): """ Base class for per-switch action endpoints. @@ -465,7 +465,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") -class V1ManageFabricSwitchProvisionRMAPost(_V1ManageFabricSwitchActionsPerSwitchBase): +class EpManageFabricSwitchProvisionRMAPost(_EpManageFabricSwitchActionsPerSwitchBase): """ # Summary @@ -492,14 +492,14 @@ class V1ManageFabricSwitchProvisionRMAPost(_V1ManageFabricSwitchActionsPerSwitch ```python # Provision RMA - request = V1ManageFabricSwitchProvisionRMAPost() + request = EpManageFabricSwitchProvisionRMAPost() request.fabric_name = "MyFabric" request.switch_sn = "SAL1948TRTT" path = request.path verb = request.verb # Provision RMA with change control ticket - request = V1ManageFabricSwitchProvisionRMAPost() + request = EpManageFabricSwitchProvisionRMAPost() request.fabric_name = "MyFabric" request.switch_sn = "SAL1948TRTT" request.endpoint_params.ticket_id = "CHG12345" @@ -509,8 +509,8 @@ class V1ManageFabricSwitchProvisionRMAPost(_V1ManageFabricSwitchActionsPerSwitch ``` """ - class_name: Literal["V1ManageFabricSwitchProvisionRMAPost"] = Field( - default="V1ManageFabricSwitchProvisionRMAPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchProvisionRMAPost"] = Field( + default="EpManageFabricSwitchProvisionRMAPost", description="Class name for backward compatibility" ) endpoint_params: SwitchActionsTicketEndpointParams = Field( default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" @@ -564,7 +564,7 @@ class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): """ -class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPerSwitchBase): +class EpManageFabricSwitchChangeSerialNumberPost(_EpManageFabricSwitchActionsPerSwitchBase): """ # Summary @@ -591,14 +591,14 @@ class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPer ```python # Change serial number - request = V1ManageFabricSwitchChangeSerialNumberPost() + request = EpManageFabricSwitchChangeSerialNumberPost() request.fabric_name = "MyFabric" request.switch_sn = "SAL1948TRTT" path = request.path verb = request.verb # Change serial number with cluster name - request = V1ManageFabricSwitchChangeSerialNumberPost() + request = EpManageFabricSwitchChangeSerialNumberPost() request.fabric_name = "MyFabric" request.switch_sn = "SAL1948TRTT" request.endpoint_params.cluster_name = "cluster1" @@ -608,8 +608,8 @@ class V1ManageFabricSwitchChangeSerialNumberPost(_V1ManageFabricSwitchActionsPer ``` """ - class_name: Literal["V1ManageFabricSwitchChangeSerialNumberPost"] = Field( - default="V1ManageFabricSwitchChangeSerialNumberPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchChangeSerialNumberPost"] = Field( + default="EpManageFabricSwitchChangeSerialNumberPost", description="Class name for backward compatibility" ) endpoint_params: SwitchActionsClusterEndpointParams = Field( default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" @@ -643,7 +643,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class V1ManageFabricSwitchActionsRediscoverPost(_V1ManageFabricSwitchActionsBase): +class EpManageFabricSwitchActionsRediscoverPost(_EpManageFabricSwitchActionsBase): """ # Summary @@ -670,13 +670,13 @@ class V1ManageFabricSwitchActionsRediscoverPost(_V1ManageFabricSwitchActionsBase ```python # Rediscover switches - request = V1ManageFabricSwitchActionsRediscoverPost() + request = EpManageFabricSwitchActionsRediscoverPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Rediscover switches with change control ticket - request = V1ManageFabricSwitchActionsRediscoverPost() + request = EpManageFabricSwitchActionsRediscoverPost() request.fabric_name = "MyFabric" request.endpoint_params.ticket_id = "CHG12345" path = request.path @@ -685,8 +685,8 @@ class V1ManageFabricSwitchActionsRediscoverPost(_V1ManageFabricSwitchActionsBase ``` """ - class_name: Literal["V1ManageFabricSwitchActionsRediscoverPost"] = Field( - default="V1ManageFabricSwitchActionsRediscoverPost", + class_name: Literal["EpManageFabricSwitchActionsRediscoverPost"] = Field( + default="EpManageFabricSwitchActionsRediscoverPost", description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py index c9cc7e36..2334e98c 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py @@ -92,7 +92,7 @@ class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQ """ -class _V1ManageFabricSwitchesBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricSwitchesBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Switches endpoints. @@ -108,7 +108,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "switches") -class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): +class EpManageFabricSwitchesGet(_EpManageFabricSwitchesBase): """ # Summary @@ -138,13 +138,13 @@ class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): ```python # List all switches - request = V1ManageFabricSwitchesGet() + request = EpManageFabricSwitchesGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb # List with filtering - request = V1ManageFabricSwitchesGet() + request = EpManageFabricSwitchesGet() request.fabric_name = "MyFabric" request.endpoint_params.hostname = "leaf1" request.endpoint_params.max = 100 @@ -154,8 +154,8 @@ class V1ManageFabricSwitchesGet(_V1ManageFabricSwitchesBase): ``` """ - class_name: Literal["V1ManageFabricSwitchesGet"] = Field( - default="V1ManageFabricSwitchesGet", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchesGet"] = Field( + default="EpManageFabricSwitchesGet", description="Class name for backward compatibility" ) endpoint_params: FabricSwitchesGetEndpointParams = Field( default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" @@ -183,7 +183,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.GET -class V1ManageFabricSwitchesPost(_V1ManageFabricSwitchesBase): +class EpManageFabricSwitchesPost(_EpManageFabricSwitchesBase): """ # Summary @@ -211,13 +211,13 @@ class V1ManageFabricSwitchesPost(_V1ManageFabricSwitchesBase): ```python # Add switches - request = V1ManageFabricSwitchesPost() + request = EpManageFabricSwitchesPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Add switches with cluster and ticket - request = V1ManageFabricSwitchesPost() + request = EpManageFabricSwitchesPost() request.fabric_name = "MyFabric" request.endpoint_params.cluster_name = "cluster1" request.endpoint_params.ticket_id = "CHG12345" @@ -227,8 +227,8 @@ class V1ManageFabricSwitchesPost(_V1ManageFabricSwitchesBase): ``` """ - class_name: Literal["V1ManageFabricSwitchesPost"] = Field( - default="V1ManageFabricSwitchesPost", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricSwitchesPost"] = Field( + default="EpManageFabricSwitchesPost", description="Class name for backward compatibility" ) endpoint_params: FabricSwitchesAddEndpointParams = Field( default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 1b67e18a..e8ecf087 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -58,22 +58,22 @@ build_poap_data_block, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( - V1ManageFabricSwitchesGet, - V1ManageFabricSwitchesPost, + EpManageFabricSwitchesGet, + EpManageFabricSwitchesPost, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_discovery import ( - V1ManageFabricShallowDiscoveryPost, + EpManageFabricShallowDiscoveryPost, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( - V1ManageFabricSwitchProvisionRMAPost, - V1ManageFabricSwitchActionsImportBootstrapPost, - V1ManageFabricSwitchActionsPreProvisionPost, - V1ManageFabricSwitchActionsRemovePost, - V1ManageFabricSwitchActionsChangeRolesPost, - V1ManageFabricSwitchChangeSerialNumberPost, + EpManageFabricSwitchProvisionRMAPost, + EpManageFabricSwitchActionsImportBootstrapPost, + EpManageFabricSwitchActionsPreProvisionPost, + EpManageFabricSwitchActionsRemovePost, + EpManageFabricSwitchActionsChangeRolesPost, + EpManageFabricSwitchChangeSerialNumberPost, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.credentials import ( - V1ManageCredentialsSwitchesPost, + EpManageCredentialsSwitchesPost, ) @@ -429,7 +429,7 @@ def bulk_discover( log.debug("ENTER: bulk_discover()") log.debug(f"Discovering {len(switches)} switches in bulk") - endpoint = V1ManageFabricShallowDiscoveryPost() + endpoint = EpManageFabricShallowDiscoveryPost() endpoint.fabric_name = self.ctx.fabric seed_ips = [switch.seed_ip for switch in switches] @@ -641,7 +641,7 @@ def bulk_add( log.debug("ENTER: bulk_add()") log.debug(f"Adding {len(switches)} switches to fabric") - endpoint = V1ManageFabricSwitchesPost() + endpoint = EpManageFabricSwitchesPost() endpoint.fabric_name = self.ctx.fabric switch_discoveries = [] @@ -768,7 +768,7 @@ def bulk_delete( log.debug("EXIT: bulk_delete() - nothing to delete") return [] - endpoint = V1ManageFabricSwitchActionsRemovePost() + endpoint = EpManageFabricSwitchActionsRemovePost() endpoint.fabric_name = self.ctx.fabric payload = {"switchIds": serial_numbers} @@ -831,7 +831,7 @@ def bulk_save_credentials( log.debug("EXIT: bulk_save_credentials() - no credentials to save") return - endpoint = V1ManageCredentialsSwitchesPost() + endpoint = EpManageCredentialsSwitchesPost() for (username, password), serial_numbers in cred_groups.items(): creds_request = SwitchCredentialsRequestModel( @@ -904,7 +904,7 @@ def bulk_update_roles( log.debug("EXIT: bulk_update_roles() - no roles to update") return - endpoint = V1ManageFabricSwitchActionsChangeRolesPost() + endpoint = EpManageFabricSwitchActionsChangeRolesPost() endpoint.fabric_name = self.ctx.fabric payload = {"switchRoles": switch_roles} @@ -1412,7 +1412,7 @@ def _import_bootstrap_switches( log.debug("ENTER: _import_bootstrap_switches()") - endpoint = V1ManageFabricSwitchActionsImportBootstrapPost() + endpoint = EpManageFabricSwitchActionsImportBootstrapPost() endpoint.fabric_name = self.ctx.fabric request_model = ImportBootstrapSwitchesRequestModel(switches=models) @@ -1532,7 +1532,7 @@ def _preprovision_switches( log.debug("ENTER: _preprovision_switches()") - endpoint = V1ManageFabricSwitchActionsPreProvisionPost() + endpoint = EpManageFabricSwitchActionsPreProvisionPost() endpoint.fabric_name = self.ctx.fabric request_model = PreProvisionSwitchesRequestModel(switches=models) @@ -1665,7 +1665,7 @@ def _handle_poap_swap( f"{old_serial} → {new_serial}" ) - endpoint = V1ManageFabricSwitchChangeSerialNumberPost() + endpoint = EpManageFabricSwitchChangeSerialNumberPost() endpoint.fabric_name = fabric endpoint.switch_sn = old_serial @@ -2083,7 +2083,7 @@ def _provision_rma_switch( log.debug("ENTER: _provision_rma_switch()") - endpoint = V1ManageFabricSwitchProvisionRMAPost() + endpoint = EpManageFabricSwitchProvisionRMAPost() endpoint.fabric_name = self.ctx.fabric endpoint.switch_id = old_switch_id @@ -2824,7 +2824,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: Returns: List of raw switch dictionaries returned by the controller. """ - endpoint = V1ManageFabricSwitchesGet() + endpoint = EpManageFabricSwitchesGet() endpoint.fabric_name = self.fabric self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") self.log.debug(f"Query verb: {endpoint.verb}") diff --git a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py index 89904696..b3e58c57 100644 --- a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py @@ -15,7 +15,7 @@ from typing import Any, Dict, List, Optional from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_bootstrap import ( - V1ManageFabricBootstrapGet, + EpManageFabricBootstrapGet, ) @@ -36,7 +36,7 @@ def query_bootstrap_switches( """ log.debug("ENTER: query_bootstrap_switches()") - endpoint = V1ManageFabricBootstrapGet() + endpoint = EpManageFabricBootstrapGet() endpoint.fabric_name = fabric log.debug(f"Bootstrap endpoint: {endpoint.path}") diff --git a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py index 1fb8da7e..244f2b46 100644 --- a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py @@ -15,9 +15,9 @@ from typing import Any, Dict, Optional from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( - V1ManageFabricConfigDeployPost, - V1ManageFabricConfigSavePost, - V1ManageFabricGet, + EpManageFabricConfigDeployPost, + EpManageFabricConfigSavePost, + EpManageFabricGet, ) from .exceptions import SwitchOperationError @@ -44,13 +44,13 @@ def __init__( self.log = logger or logging.getLogger("nd.FabricUtils") # Pre-configure endpoints - self.ep_config_save = V1ManageFabricConfigSavePost() + self.ep_config_save = EpManageFabricConfigSavePost() self.ep_config_save.fabric_name = fabric - self.ep_config_deploy = V1ManageFabricConfigDeployPost() + self.ep_config_deploy = EpManageFabricConfigDeployPost() self.ep_config_deploy.fabric_name = fabric - self.ep_fabric_get = V1ManageFabricGet() + self.ep_fabric_get = EpManageFabricGet() self.ep_fabric_get.fabric_name = fabric # ----------------------------------------------------------------- diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py index 31665ee7..bb81a673 100644 --- a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py @@ -15,13 +15,13 @@ from typing import Any, Dict, List, Optional from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( - V1ManageFabricInventoryDiscoverGet, + EpManageFabricInventoryDiscoverGet, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( - V1ManageFabricSwitchesGet, + EpManageFabricSwitchesGet, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( - V1ManageFabricSwitchActionsRediscoverPost, + EpManageFabricSwitchActionsRediscoverPost, ) from .fabric_utils import FabricUtils @@ -94,13 +94,13 @@ def __init__( ) # Pre-configure endpoints - self.ep_switches_get = V1ManageFabricSwitchesGet() + self.ep_switches_get = EpManageFabricSwitchesGet() self.ep_switches_get.fabric_name = fabric - self.ep_inventory_discover = V1ManageFabricInventoryDiscoverGet() + self.ep_inventory_discover = EpManageFabricInventoryDiscoverGet() self.ep_inventory_discover.fabric_name = fabric - self.ep_rediscover = V1ManageFabricSwitchActionsRediscoverPost() + self.ep_rediscover = EpManageFabricSwitchActionsRediscoverPost() self.ep_rediscover.fabric_name = fabric # Cached greenfield flag From 095e474cef761379f58b2383fe4c80f0e7daf5d5 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 16 Mar 2026 14:01:13 +0530 Subject: [PATCH 013/109] Add NDOutput for displaying the result --- .../nd_manage_switches/config_models.py | 14 ++ .../nd_manage_switches/switch_data_models.py | 22 +++ plugins/module_utils/nd_switch_resources.py | 135 +++++++++++++++--- 3 files changed, 149 insertions(+), 22 deletions(-) diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index eb912e7e..267598c2 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -448,6 +448,20 @@ def operation_type(self) -> Literal["normal", "poap", "rma"]: description="Software version from inventory API" ) + def to_config_dict(self) -> Dict[str, Any]: + """Return the playbook config as a dict with all credentials stripped. + + Returns: + Dict of config fields with ``user_name``, ``password``, + ``discovery_username``, and ``discovery_password`` excluded. + """ + return self.to_config(exclude={ + "user_name": True, + "password": True, + "poap": {"__all__": {"discovery_username": True, "discovery_password": True}}, + "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, + }) + @model_validator(mode='before') @classmethod def reject_auth_proto_for_poap_rma(cls, data: Any) -> Any: diff --git a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py index 08147ce4..ccfb571f 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py @@ -472,6 +472,28 @@ def from_response(cls, response: Dict[str, Any]) -> Self: return cls.model_validate(transformed) + def to_config_dict(self) -> Dict[str, Any]: + """Return this inventory record using the 7 standard user-facing fields. + + Produces a consistent dict for previous/current output keys. All 7 + fields are always present (None when not available). Credential fields + are never included. + + Returns: + Dict with keys: seed_ip, serial_number, hostname, model, + role, software_version, mode. + """ + ad = self.additional_data + return { + "seed_ip": self.fabric_management_ip or self.switch_id or "", + "serial_number": self.serial_number, + "hostname": self.hostname, + "model": self.model, + "role": self.switch_role, + "software_version": self.software_version, + "mode": (ad.system_mode if ad and hasattr(ad, "system_mode") else None), + } + __all__ = [ "TelemetryIpCollection", diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index e8ecf087..342b7dc1 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -23,6 +23,8 @@ from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType +from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches import ( SwitchRole, @@ -85,6 +87,43 @@ _DISCOVERY_MAX_HOPS: int = 0 +# ========================================================================= +# Output Collections +# ========================================================================= + +class SwitchOutputCollection(NDConfigCollection): + """Output collection for all output keys (previous, current, proposed, diff). + + Accepts ``SwitchDataModel``, ``SwitchConfigModel``, or ``_DiffRecord`` items + and serializes them via ``to_config_dict()``. + """ + + def __init__(self, model_class=None, items: Optional[List] = None): + # Store directly — skip add() type guard to support mixed-type diffs. + self._model_class = model_class + self._items: List = list(items) if items else [] + self._index: Dict = {} + + def to_ansible_config(self, **kwargs) -> List[Dict]: + return [item.to_config_dict() for item in self._items] + + def copy(self) -> "SwitchOutputCollection": + return SwitchOutputCollection( + model_class=self._model_class, + items=deepcopy(list(self._items)), + ) + + +@dataclass +class _DiffRecord: + """Wraps a plain dict as a diff entry, exposing ``to_config_dict()``.""" + + data: Dict[str, Any] + + def to_config_dict(self) -> Dict[str, Any]: + return self.data + + @dataclass class SwitchServiceContext: """Store shared dependencies used by service classes. @@ -2177,12 +2216,12 @@ def __init__( # Switch collections try: - self.proposed: List[SwitchDataModel] = [] - self.existing: List[SwitchDataModel] = [ - SwitchDataModel.model_validate(sw) - for sw in self._query_all_switches() - ] - self.previous: List[SwitchDataModel] = deepcopy(self.existing) + self.proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.existing: SwitchOutputCollection = SwitchOutputCollection.from_api_response( + response_data=self._query_all_switches(), + model_class=SwitchDataModel, + ) + self.previous: SwitchOutputCollection = self.existing.copy() except Exception as e: msg = ( f"Failed to query fabric '{self.fabric}' inventory " @@ -2194,6 +2233,11 @@ def __init__( # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] + # Output tracking — NDOutput serializes all collections via their + # overridden to_ansible_config() methods. + self.output = NDOutput(output_level=self.module.params.get("output_level", "normal")) + self.output.assign(before=self.previous, after=self.existing) + # Utility instances (SwitchWaitUtils / FabricUtils depend on self) self.fabric_utils = FabricUtils(self.nd, self.fabric, log) self.wait_utils = SwitchWaitUtils( @@ -2220,17 +2264,26 @@ def exit_json(self) -> None: self.results.build_final_result() final = self.results.final_result - final["logs"] = self.nd_logs - final["previous"] = ( - [sw.model_dump(by_alias=True) for sw in self.previous] - if self.previous - else [] - ) - final["current"] = ( - [sw.model_dump(by_alias=True) for sw in self.existing] - if self.existing - else [] - ) + # NDOutput owns serialization of before/after/proposed via their + # overridden to_ansible_config() methods. We only set _changed + # manually because self.existing is not re-queried after mutations + # so the auto-diff in NDOutput.format() would see no change. + self.output._changed = bool(final.get("changed", False)) + formatted = self.output.format() + + output_level = formatted["output_level"] + + # Rename before/after to previous/current for backward compatibility. + final["previous"] = formatted.pop("before", []) + final["current"] = formatted.pop("after", []) + final["output_level"] = output_level + final["diff"] = formatted.get("diff", []) + + if output_level in ("info", "debug"): + final["proposed"] = formatted.get("proposed", []) + if output_level == "debug": + # Override NDOutput's placeholder with real operation logs. + final["logs"] = self.nd_logs if True in self.results.failed: self.nd.module.fail_json(**final) @@ -2259,6 +2312,12 @@ def manage_state(self) -> None: if self.config else None ) + if proposed_config: + self.output.assign( + proposed=SwitchOutputCollection( + model_class=SwitchConfigModel, items=proposed_config + ) + ) if self.state == "deleted": return self._handle_deleted_state(proposed_config) return self._handle_query_state(proposed_config) @@ -2272,21 +2331,28 @@ def manage_state(self) -> None: proposed_config = SwitchDiffEngine.validate_configs( self.config, self.state, self.nd, self.log ) + # Register proposed config (credentials excluded via SwitchOutputCollection) + self.output.assign( + proposed=SwitchOutputCollection( + model_class=SwitchConfigModel, items=proposed_config + ) + ) self.operation_type = proposed_config[0].operation_type # POAP and RMA bypass normal discovery — delegate to handlers if self.operation_type == "poap": - return self.poap_handler.handle(proposed_config, self.existing) + return self.poap_handler.handle(proposed_config, list(self.existing)) if self.operation_type == "rma": - return self.rma_handler.handle(proposed_config, self.existing) + return self.rma_handler.handle(proposed_config, list(self.existing)) # Normal: discover → build proposed models → compute diff → delegate discovered_data = self.discovery.discover(proposed_config) - self.proposed = self.discovery.build_proposed( - proposed_config, discovered_data, self.existing + built = self.discovery.build_proposed( + proposed_config, discovered_data, list(self.existing) ) + self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) diff = SwitchDiffEngine.compute_changes( - self.proposed, self.existing, self.log + list(self.proposed), list(self.existing), self.log ) state_handlers = { @@ -2436,6 +2502,7 @@ def _handle_merged_state( # Collect (serial_number, SwitchConfigModel) pairs for post-processing switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + diff_items: List = [] # Phase 4: Bulk add new switches to fabric if switches_to_add and discovered_data: @@ -2479,6 +2546,17 @@ def _handle_merged_state( sn = disc.get("serialNumber") if sn: switch_actions.append((sn, cfg)) + # Discovery response has softwareVersion, hostname, + # model — richer than SwitchConfigModel fields. + diff_items.append(_DiffRecord({ + "seed_ip": cfg.seed_ip, + "serial_number": sn, + "hostname": disc.get("hostname"), + "model": disc.get("model"), + "role": cfg.role, + "software_version": disc.get("softwareVersion"), + "mode": None, + })) self._log_operation("add", cfg.seed_ip) # Phase 5: Collect migration switches for post-processing @@ -2487,6 +2565,9 @@ def _handle_merged_state( cfg = config_by_ip.get(mig_sw.fabric_management_ip) if cfg and mig_sw.switch_id: switch_actions.append((mig_sw.switch_id, cfg)) + # mig_sw is a SwitchDataModel — has all 7 fields including + # software_version and mode from the inventory API. + diff_items.append(mig_sw) self._log_operation("migrate", mig_sw.fabric_management_ip) if not switch_actions: @@ -2514,6 +2595,7 @@ def _handle_merged_state( all_preserve_config=all_preserve_config, update_roles=True, ) + self.output.assign(diff=SwitchOutputCollection(items=diff_items)) self.log.debug("EXIT: _handle_merged_state() - completed") @@ -2565,10 +2647,17 @@ def _merged_handle_role_changes( # Build (switch_id, SwitchConfigModel) pairs and apply role change role_actions: List[Tuple[str, SwitchConfigModel]] = [] + role_diff_items: List = [] for sw in role_change_switches: cfg = config_by_ip.get(sw.fabric_management_ip) if cfg and sw.switch_id: role_actions.append((sw.switch_id, cfg)) + # Use existing SwitchDataModel for software_version + mode; + # override role with the desired value from the playbook. + record = sw.to_config_dict() + if cfg.role is not None: + record["role"] = cfg.role + role_diff_items.append(_DiffRecord(record)) if role_actions: self.log.info( @@ -2576,6 +2665,7 @@ def _merged_handle_role_changes( ) self.fabric_ops.bulk_update_roles(role_actions) self.fabric_ops.finalize() + self.output.assign(diff=SwitchOutputCollection(items=role_diff_items)) def _merged_handle_idempotent( self, @@ -2812,6 +2902,7 @@ def _handle_deleted_state( f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" ) self.fabric_ops.bulk_delete(switches_to_delete) + self.output.assign(diff=SwitchOutputCollection(items=switches_to_delete)) self.log.debug("EXIT: _handle_deleted_state()") # ===================================================================== From b0f3f3480d33ca9d3bba0519b58f27498df0ba23 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 16 Mar 2026 14:56:32 +0530 Subject: [PATCH 014/109] Update Results object API calls from Module + Operation Handling --- plugins/module_utils/nd_switch_resources.py | 59 +++++++++++++-------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 342b7dc1..b9a35a3c 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -497,10 +497,11 @@ def bulk_discover( result = nd.rest_send.result_current results.action = "discover" + results.operation_type = OperationType.QUERY results.response_current = response results.result_current = result results.diff_current = payload - results.register_task_result() + results.register_api_call() # Extract discovered switches from response switches_data = [] @@ -748,10 +749,11 @@ def bulk_add( result = nd.rest_send.result_current results.action = "create" + results.operation_type = OperationType.CREATE results.response_current = response results.result_current = result results.diff_current = payload - results.register_task_result() + results.register_api_call() if not result.get("success"): msg = ( @@ -825,10 +827,11 @@ def bulk_delete( result = nd.rest_send.result_current results.action = "delete" + results.operation_type = OperationType.DELETE results.response_current = response results.result_current = result results.diff_current = {"deleted": serial_numbers} - results.register_task_result() + results.register_api_call() log.info(f"Bulk delete submitted for {len(serial_numbers)} switch(es)") log.debug("EXIT: bulk_delete()") @@ -895,13 +898,14 @@ def bulk_save_credentials( result = nd.rest_send.result_current results.action = "save_credentials" + results.operation_type = OperationType.UPDATE results.response_current = response results.result_current = result results.diff_current = { "switchIds": serial_numbers, "username": username, } - results.register_task_result() + results.register_api_call() log.info(f"Credentials saved for {len(serial_numbers)} switch(es)") except Exception as e: msg = ( @@ -958,10 +962,11 @@ def bulk_update_roles( result = nd.rest_send.result_current results.action = "update_role" + results.operation_type = OperationType.UPDATE results.response_current = response results.result_current = result results.diff_current = payload - results.register_task_result() + results.register_api_call() log.info(f"Roles updated for {len(switch_roles)} switch(es)") except Exception as e: msg = ( @@ -1110,12 +1115,13 @@ def handle( if nd.module.check_mode: log.info("Check mode: would run POAP bootstrap / pre-provision") results.action = "poap" + results.operation_type = OperationType.CREATE results.response_current = {"MESSAGE": "check mode — skipped"} results.result_current = {"success": True, "changed": True} results.diff_current = { "poap_switches": [pc.seed_ip for pc in proposed_config] } - results.register_task_result() + results.register_api_call() return # Classify entries @@ -1176,10 +1182,11 @@ def handle( if not bootstrap_entries and not preprov_entries and not swap_entries: log.warning("No POAP switch models built — nothing to process") results.action = "poap" + results.operation_type = OperationType.QUERY results.response_current = {"MESSAGE": "no switches to process"} results.result_current = {"success": True, "changed": False} results.diff_current = {} - results.register_task_result() + results.register_api_call() log.debug("EXIT: POAPHandler.handle()") @@ -1480,10 +1487,11 @@ def _import_bootstrap_switches( result = nd.rest_send.result_current results.action = "bootstrap" + results.operation_type = OperationType.CREATE results.response_current = response results.result_current = result results.diff_current = payload - results.register_task_result() + results.register_api_call() if not result.get("success"): msg = ( @@ -1600,10 +1608,11 @@ def _preprovision_switches( result = nd.rest_send.result_current results.action = "preprovision" + results.operation_type = OperationType.CREATE results.response_current = response results.result_current = result results.diff_current = payload - results.register_task_result() + results.register_api_call() if not result.get("success"): msg = ( @@ -1732,13 +1741,14 @@ def _handle_poap_swap( result = nd.rest_send.result_current results.action = "swap_serial" + results.operation_type = OperationType.UPDATE results.response_current = response results.result_current = result results.diff_current = { "old_serial": old_serial, "new_serial": new_serial, } - results.register_task_result() + results.register_api_call() if not result.get("success"): msg = ( @@ -1875,12 +1885,13 @@ def handle( if nd.module.check_mode: log.info("Check mode: would run RMA provision") results.action = "rma" + results.operation_type = OperationType.CREATE results.response_current = {"MESSAGE": "check mode — skipped"} results.result_current = {"success": True, "changed": True} results.diff_current = { "rma_switches": [pc.seed_ip for pc in proposed_config] } - results.register_task_result() + results.register_api_call() return # Collect (SwitchConfigModel, RMAConfigModel) pairs @@ -1897,10 +1908,11 @@ def handle( if not rma_entries: log.warning("No RMA entries found — nothing to process") results.action = "rma" + results.operation_type = OperationType.QUERY results.response_current = {"MESSAGE": "no switches to process"} results.result_current = {"success": True, "changed": False} results.diff_current = {} - results.register_task_result() + results.register_api_call() return log.info(f"Found {len(rma_entries)} RMA entry/entries to process") @@ -2146,13 +2158,14 @@ def _provision_rma_switch( result = nd.rest_send.result_current results.action = "rma" + results.operation_type = OperationType.CREATE results.response_current = response results.result_current = result results.diff_current = { "old_switch_id": old_switch_id, "new_switch_id": rma_model.new_switch_id, } - results.register_task_result() + results.register_api_call() if not result.get("success"): msg = ( @@ -2264,10 +2277,14 @@ def exit_json(self) -> None: self.results.build_final_result() final = self.results.final_result - # NDOutput owns serialization of before/after/proposed via their - # overridden to_ansible_config() methods. We only set _changed - # manually because self.existing is not re-queried after mutations - # so the auto-diff in NDOutput.format() would see no change. + # Re-query the fabric to get the actual post-operation inventory so + # that "current" reflects real state rather than the pre-op snapshot. + if True not in self.results.failed and not self.nd.module.check_mode: + self.existing = SwitchOutputCollection.from_api_response( + response_data=self._query_all_switches(), model_class=SwitchDataModel + ) + self.output.assign(after=self.existing) + self.output._changed = bool(final.get("changed", False)) formatted = self.output.format() @@ -2432,7 +2449,7 @@ def _handle_query_state( "success": True, } self.results.diff_current = {} - self.results.register_task_result() + self.results.register_api_call() self.log.debug(f"Returning {len(switch_data)} switches in results") self.log.debug("EXIT: _handle_query_state()") @@ -2497,7 +2514,7 @@ def _handle_merged_state( "to_add": [sw.fabric_management_ip for sw in switches_to_add], "migration_mode": [sw.fabric_management_ip for sw in migration_switches], } - self.results.register_task_result() + self.results.register_api_call() return # Collect (serial_number, SwitchConfigModel) pairs for post-processing @@ -2791,7 +2808,7 @@ def _handle_overridden_state( "to_add": n_add, "migration_mode": n_migrate, } - self.results.register_task_result() + self.results.register_api_call() return switches_to_delete: List[SwitchDataModel] = [] @@ -2895,7 +2912,7 @@ def _handle_deleted_state( self.results.diff_current = { "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], } - self.results.register_task_result() + self.results.register_api_call() return self.log.info( From 47db2b11f71eae9219b3c05383716b5a3d45889a Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 16 Mar 2026 15:37:09 +0530 Subject: [PATCH 015/109] Fix ConfigSync Status Error --- plugins/module_utils/nd_switch_resources.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index b9a35a3c..b9dc274a 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -2657,7 +2657,7 @@ def _merged_handle_role_changes( f"Role change not possible for switch " f"{sw.fabric_management_ip} ({sw.switch_id}). " f"configSyncStatus is " - f"'{status.value if status else 'unknown'}', " + f"'{getattr(status, 'value', status) if status else 'unknown'}', " f"expected '{ConfigSyncStatus.NOT_APPLICABLE.value}'." ) ) @@ -2717,7 +2717,7 @@ def _merged_handle_idempotent( self.log.info( f"Switch {sw.fabric_management_ip} ({sw.switch_id}) is " f"config-idempotent but configSyncStatus is " - f"'{status.value if status else 'unknown'}' — " + f"'{getattr(status, 'value', status) if status else 'unknown'}' — " f"will run config save and deploy" ) finalize_needed = True From 28703b54fbdb16eb073cc02c5fa7dceb0bd525c1 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 16 Mar 2026 17:22:43 +0530 Subject: [PATCH 016/109] Add duplicate ip validation in configs, fix api changes in module --- plugins/module_utils/nd_switch_resources.py | 22 +++++++++++++++++++-- plugins/modules/nd_manage_switches.py | 4 ++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index b9dc274a..ba5d3d41 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -221,6 +221,24 @@ def validate_configs( else: raise + # Duplicate seed_ip check + seen_ips: set = set() + duplicate_ips: set = set() + for cfg in validated_configs: + if cfg.seed_ip in seen_ips: + duplicate_ips.add(cfg.seed_ip) + seen_ips.add(cfg.seed_ip) + if duplicate_ips: + error_msg = ( + f"Duplicate seed_ip entries found in config: " + f"{sorted(duplicate_ips)}. Each switch must appear only once." + ) + log.error(error_msg) + if hasattr(nd, 'module'): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) + operation_type = validated_configs[0].operation_type log.info( f"Successfully validated {len(validated_configs)} " @@ -320,10 +338,10 @@ def compute_changes( continue prop_dict = prop_sw.model_dump( - by_alias=True, exclude_none=True, include=compare_fields + by_alias=False, exclude_none=True, include=compare_fields ) existing_dict = existing_sw.model_dump( - by_alias=True, exclude_none=True, include=compare_fields + by_alias=False, exclude_none=True, include=compare_fields ) if prop_dict == existing_dict: diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 559f1bd0..f3d14ca6 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -582,7 +582,7 @@ def main(): } results.diff_current = {} - results.register_task_result() + results.register_api_call() results.build_final_result() # Add error details if debug output is requested @@ -608,7 +608,7 @@ def main(): "found": False, } results.diff_current = {} - results.register_task_result() + results.register_api_call() results.build_final_result() if output_level == "debug": From 2372a741419124281de092deb032005e58d9ce1b Mon Sep 17 00:00:00 2001 From: AKDRG Date: Tue, 17 Mar 2026 00:28:00 +0530 Subject: [PATCH 017/109] RMA, POAP Bootstrap and Diff Fixes --- .../models/nd_manage_switches/__init__.py | 2 - .../nd_manage_switches/bootstrap_models.py | 10 + .../nd_manage_switches/config_models.py | 21 +- .../nd_manage_switches/preprovision_models.py | 29 +- .../models/nd_manage_switches/rma_models.py | 70 +-- plugins/module_utils/nd_switch_resources.py | 548 +++++++++--------- .../nd_manage_switches/switch_wait_utils.py | 93 ++- 7 files changed, 392 insertions(+), 381 deletions(-) diff --git a/plugins/module_utils/models/nd_manage_switches/__init__.py b/plugins/module_utils/models/nd_manage_switches/__init__.py index 6ddd6cd8..17415a32 100644 --- a/plugins/module_utils/models/nd_manage_switches/__init__.py +++ b/plugins/module_utils/models/nd_manage_switches/__init__.py @@ -73,7 +73,6 @@ # --- RMA models --- from .rma_models import ( # noqa: F401 - RMASpecificModel, RMASwitchModel, ) @@ -130,7 +129,6 @@ "PreProvisionSwitchesRequestModel", "PreProvisionSwitchModel", # RMA models - "RMASpecificModel", "RMASwitchModel", # Switch actions models "ChangeSwitchSerialNumberRequestModel", diff --git a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py index 864a8e25..9825c0c3 100644 --- a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py @@ -272,6 +272,16 @@ class BootstrapImportSwitchModel(NDBaseModel): default=None, alias="discoveryPassword" ) + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials" + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials" + ) data: Optional[Dict[str, Any]] = Field( default=None, description="Bootstrap configuration data block (gatewayIpMask, models)" diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index 267598c2..4c22acec 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -268,15 +268,15 @@ class RMAConfigModel(NDNestedModel): min_length=1, description="Serial number of switch to be replaced by RMA" ) - model: str = Field( - ..., + model: Optional[str] = Field( + default=None, min_length=1, - description="Model of switch to Bootstrap for RMA" + description="Model of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API." ) - version: str = Field( - ..., + version: Optional[str] = Field( + default=None, min_length=1, - description="Software version of switch to Bootstrap for RMA" + description="Software version of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API." ) # Optional fields @@ -286,13 +286,14 @@ class RMAConfigModel(NDNestedModel): description="Name of the image policy to be applied on switch during Bootstrap for RMA" ) - # Required config data for RMA (models list + gateway) - config_data: ConfigDataModel = Field( - ..., + # Optional config data for RMA (models list + gateway); sourced from bootstrap API if omitted + config_data: Optional[ConfigDataModel] = Field( + default=None, alias="configData", description=( "Basic config data of switch to Bootstrap for RMA. " - "'models' (list of module models) and 'gateway' (IP with mask) are mandatory." + "'models' (list of module models) and 'gateway' (IP with mask) are mandatory " + "when provided. If omitted, sourced from bootstrap API." ), ) diff --git a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py index 1cd8b8a0..9e34910a 100644 --- a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py @@ -14,7 +14,7 @@ __metaclass__ = type from ipaddress import ip_network -from pydantic import Field, field_validator, model_validator +from pydantic import Field, computed_field, field_validator from typing import Any, Dict, List, Optional, ClassVar, Literal from typing_extensions import Self @@ -110,14 +110,6 @@ class PreProvisionSwitchModel(NDBaseModel): ) # --- bootstrapCredential fields (optional) --- - use_new_credentials: bool = Field( - default=False, - alias="useNewCredentials", - description=( - "If True, use discoveryUsername and discoveryPassword for local " - "remoteCredentialStore or use remoteCredentialStoreKey for CyberArk" - ), - ) discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", @@ -128,11 +120,16 @@ class PreProvisionSwitchModel(NDBaseModel): alias="discoveryPassword", description="Password for switch discovery post pre-provision", ) - remote_credential_store: Optional[RemoteCredentialStore] = Field( - default=None, + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore", description="Type of credential store for discovery credentials", ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials", + ) # --- Validators --- @@ -175,11 +172,11 @@ def validate_gateway(cls, v: str) -> str: raise ValueError(f"Invalid gatewayIpMask: {v}") from exc return v - @model_validator(mode='after') - def derive_use_new_credentials(self) -> Self: - """Auto-set useNewCredentials when both discoveryUsername and discoveryPassword are provided.""" - self.use_new_credentials = bool(self.discovery_username and self.discovery_password) - return self + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) def to_payload(self) -> Dict[str, Any]: """Convert to API payload format matching preProvision spec.""" diff --git a/plugins/module_utils/models/nd_manage_switches/rma_models.py b/plugins/module_utils/models/nd_manage_switches/rma_models.py index 1f5be8b5..12f6c891 100644 --- a/plugins/module_utils/models/nd_manage_switches/rma_models.py +++ b/plugins/module_utils/models/nd_manage_switches/rma_models.py @@ -26,71 +26,6 @@ ) from .validators import SwitchValidators - -class RMASpecificModel(NDBaseModel): - """ - Replacement-switch-specific fields used in an RMA bootstrap operation. - """ - identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - hostname: str = Field( - ..., - description="Hostname of the switch" - ) - ip: str = Field( - ..., - description="IP address of the switch" - ) - new_switch_id: str = Field( - ..., - alias="newSwitchId", - description="SwitchId (serial number) of the switch" - ) - public_key: str = Field( - ..., - alias="publicKey", - description="Public Key" - ) - finger_print: str = Field( - ..., - alias="fingerPrint", - description="Fingerprint" - ) - dhcp_bootstrap_ip: Optional[str] = Field( - default=None, - alias="dhcpBootstrapIp", - description="This is used for device day-0 bring-up when using inband reachability" - ) - seed_switch: bool = Field( - default=False, - alias="seedSwitch", - description="Use as seed switch" - ) - - @field_validator('hostname', mode='before') - @classmethod - def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result - - @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') - @classmethod - def validate_ip(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None - return SwitchValidators.validate_ip_address(v) - - @field_validator('new_switch_id', mode='before') - @classmethod - def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("new_switch_id cannot be empty") - return result - - class RMASwitchModel(NDBaseModel): """ Request payload for provisioning a replacement (RMA) switch via bootstrap. @@ -183,6 +118,10 @@ class RMASwitchModel(NDBaseModel): default=False, alias="seedSwitch" ) + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Bootstrap configuration data block (gatewayIpMask, models)" + ) @field_validator('gateway_ip_mask', mode='before') @classmethod @@ -253,6 +192,5 @@ def from_response(cls, response: Dict[str, Any]) -> Self: __all__ = [ - "RMASpecificModel", "RMASwitchModel", ] diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index ba5d3d41..e0fa6ef0 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -142,6 +142,7 @@ class SwitchServiceContext: log: logging.Logger save_config: bool = True deploy_config: bool = True + output: Optional[NDOutput] = None # ========================================================================= @@ -298,7 +299,6 @@ def compute_changes( changes: Dict[str, list] = { "to_add": [], "to_update": [], - "role_change": [], "to_delete": [], "migration_mode": [], "idempotent": [], @@ -348,28 +348,16 @@ def compute_changes( log.debug(f"Switch {ip} is idempotent — no changes needed") changes["idempotent"].append(prop_sw) else: - diff_keys = { - k for k in set(prop_dict) | set(existing_dict) - if prop_dict.get(k) != existing_dict.get(k) - } - if diff_keys == {"switch_role"}: - log.info( - f"Switch {ip} has role-only difference — marking role_change. " - f"proposed: {prop_dict.get('switch_role')}, " - f"existing: {existing_dict.get('switch_role')}" - ) - changes["role_change"].append(prop_sw) - else: - log.info( - f"Switch {ip} has differences — marking to_update. " - f"Changed fields: {diff_keys}" - ) - log.debug( - f"Switch {ip} diff detail — " - f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " - f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" - ) - changes["to_update"].append(prop_sw) + log.info( + f"Switch {ip} has differences — marking to_update. " + f"Changed fields: {diff_keys}" + ) + log.debug( + f"Switch {ip} diff detail — " + f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " + f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" + ) + changes["to_update"].append(prop_sw) # Switches in existing but not in proposed (for overridden state) proposed_ids = {sw.switch_id for sw in proposed} @@ -385,7 +373,6 @@ def compute_changes( f"Compute changes summary: " f"to_add={len(changes['to_add'])}, " f"to_update={len(changes['to_update'])}, " - f"role_change={len(changes['role_change'])}, " f"to_delete={len(changes['to_delete'])}, " f"migration_mode={len(changes['migration_mode'])}, " f"idempotent={len(changes['idempotent'])}" @@ -393,6 +380,106 @@ def compute_changes( log.debug("EXIT: compute_changes()") return changes + @staticmethod + def validate_switch_api_fields( + nd: NDModule, + serial: str, + model: Optional[str], + version: Optional[str], + config_data, + bootstrap_data: Dict[str, Any], + log: logging.Logger, + context: str, + hostname: Optional[str] = None, + ) -> None: + """Validate user-supplied switch fields against the bootstrap API response. + + Only fields that are provided (non-None) are validated against the API. + Fields that are omitted are silently filled in from the API at build + time — no error is raised for those. Any omitted fields are logged at + INFO level so the operator can see what was sourced from the API. + + Args: + nd: ND module wrapper used for failure handling. + serial: Serial number of the switch being processed. + model: User-provided switch model, or None if omitted. + version: User-provided software version, or None if omitted. + config_data: User-provided ``ConfigDataModel``, or None if omitted. + bootstrap_data: Matching entry from the bootstrap GET API. + log: Logger instance. + context: Label used in error messages (e.g. ``"Bootstrap"`` or ``"RMA"``). + hostname: User-provided hostname, or None if omitted (bootstrap only). + + Returns: + None. + """ + bs_data = bootstrap_data.get("data") or {} + mismatches: List[str] = [] + + if model is not None and model != bootstrap_data.get("model"): + mismatches.append( + f"model: provided '{model}', " + f"bootstrap reports '{bootstrap_data.get('model')}'" + ) + + if version is not None and version != bootstrap_data.get("softwareVersion"): + mismatches.append( + f"version: provided '{version}', " + f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'" + ) + + if config_data is not None: + bs_gateway = ( + bootstrap_data.get("gatewayIpMask") + or bs_data.get("gatewayIpMask") + ) + if config_data.gateway is not None and config_data.gateway != bs_gateway: + mismatches.append( + f"config_data.gateway: provided '{config_data.gateway}', " + f"bootstrap reports '{bs_gateway}'" + ) + + bs_models = bs_data.get("models", []) + if ( + config_data.models + and sorted(config_data.models) != sorted(bs_models) + ): + mismatches.append( + f"config_data.models: provided {config_data.models}, " + f"bootstrap reports {bs_models}" + ) + + if mismatches: + nd.module.fail_json( + msg=( + f"{context} field mismatch for serial '{serial}'. " + f"The following provided values do not match the " + f"bootstrap API data:\n" + + "\n".join(f" - {m}" for m in mismatches) + ) + ) + + # Log any fields that were omitted and will be sourced from the API + pulled: List[str] = [] + if model is None: + pulled.append("model") + if version is None: + pulled.append("version") + if hostname is None: + pulled.append("hostname") + if config_data is None: + pulled.append("config_data (gateway + models)") + if pulled: + log.info( + f"{context} serial '{serial}': the following fields were not " + f"provided and will be sourced from the bootstrap API: " + f"{', '.join(pulled)}" + ) + else: + log.debug( + f"{context} field validation passed for serial '{serial}'" + ) + # ========================================================================= # Switch Discovery Service @@ -1196,6 +1283,20 @@ def handle( if preprov_models: self._preprovision_switches(preprov_models) + if self.ctx.output: + diff_items = [ + _DiffRecord({ + "serial_number": m.serial_number, + "hostname": m.hostname, + "ip": m.ip, + "model": m.model, + "software_version": m.software_version, + "role": m.switch_role, + }) + for m in preprov_models + ] + self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) + # Edge case: nothing actionable if not bootstrap_entries and not preprov_entries and not swap_entries: log.warning("No POAP switch models built — nothing to process") @@ -1251,7 +1352,17 @@ def _handle_poap_bootstrap( # Validate user-supplied fields against bootstrap data (if provided) # and warn about any fields that will be pulled from the API. - self._validate_bootstrap_fields(poap_cfg, bootstrap_data, log) + SwitchDiffEngine.validate_switch_api_fields( + nd=nd, + serial=poap_cfg.serial_number, + model=poap_cfg.model, + version=poap_cfg.version, + config_data=poap_cfg.config_data, + bootstrap_data=bootstrap_data, + log=log, + context="Bootstrap", + hostname=poap_cfg.hostname, + ) model = self._build_bootstrap_import_model( switch_cfg, poap_cfg, bootstrap_data @@ -1281,91 +1392,22 @@ def _handle_poap_bootstrap( skip_greenfield_check=True, ) - log.debug("EXIT: _handle_poap_bootstrap()") + if self.ctx.output: + import_by_serial = {m.serial_number: m for m in import_models} + diff_items = [ + _DiffRecord({ + "seed_ip": switch_cfg.seed_ip, + "serial_number": serial, + "hostname": import_by_serial[serial].hostname if serial in import_by_serial else None, + "model": import_by_serial[serial].model if serial in import_by_serial else None, + "software_version": import_by_serial[serial].version if serial in import_by_serial else None, + "role": switch_cfg.role, + }) + for serial, switch_cfg in switch_actions + ] + self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) - def _validate_bootstrap_fields( - self, - poap_cfg: POAPConfigModel, - bootstrap_data: Dict[str, Any], - log: logging.Logger, - ) -> None: - """Validate user-supplied bootstrap fields against the bootstrap API response. - - If a field is provided in the playbook config, it must match what the - bootstrap API reports. Fields that are omitted are silently filled in - from the API at import time — no error is raised for those. - - Args: - poap_cfg: POAP config entry from the playbook. - bootstrap_data: Matching entry from the bootstrap GET API. - log: Logger instance. - - Returns: - None. - """ - serial = poap_cfg.serial_number - bs_data = bootstrap_data.get("data") or {} - mismatches: List[str] = [] - - if poap_cfg.model and poap_cfg.model != bootstrap_data.get("model"): - mismatches.append( - f"model: provided '{poap_cfg.model}', " - f"bootstrap reports '{bootstrap_data.get('model')}'" - ) - - if poap_cfg.version and poap_cfg.version != bootstrap_data.get("softwareVersion"): - mismatches.append( - f"version: provided '{poap_cfg.version}', " - f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'" - ) - - if poap_cfg.config_data: - bs_gateway = ( - bootstrap_data.get("gatewayIpMask") - or bs_data.get("gatewayIpMask") - ) - if poap_cfg.config_data.gateway and poap_cfg.config_data.gateway != bs_gateway: - mismatches.append( - f"config_data.gateway: provided '{poap_cfg.config_data.gateway}', " - f"bootstrap reports '{bs_gateway}'" - ) - - bs_models = bs_data.get("models", []) - if ( - poap_cfg.config_data.models - and sorted(poap_cfg.config_data.models) != sorted(bs_models) - ): - mismatches.append( - f"config_data.models: provided {poap_cfg.config_data.models}, " - f"bootstrap reports {bs_models}" - ) - - if mismatches: - self.ctx.nd.module.fail_json( - msg=( - f"Bootstrap field mismatch for serial '{serial}'. " - f"The following provided values do not match the " - f"bootstrap API data:\n" - + "\n".join(f" - {m}" for m in mismatches) - ) - ) - - # Log which fields will be sourced from the bootstrap API - pulled: List[str] = [] - if not poap_cfg.model: - pulled.append("model") - if not poap_cfg.version: - pulled.append("version") - if not poap_cfg.hostname: - pulled.append("hostname") - if not poap_cfg.config_data: - pulled.append("config_data (gateway + models)") - if pulled: - log.info( - f"Bootstrap serial '{serial}': the following fields were not " - f"provided and will be sourced from the bootstrap API: " - f"{', '.join(pulled)}" - ) + log.debug("EXIT: _handle_poap_bootstrap()") def _build_bootstrap_import_model( self, @@ -1948,6 +1990,7 @@ def handle( # Build and submit each RMA request switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = [] # (new_serial, old_serial, switch_cfg) for switch_cfg, rma_cfg in rma_entries: new_serial = rma_cfg.serial_number bootstrap_data = bootstrap_idx.get(new_serial) @@ -1962,6 +2005,17 @@ def handle( log.error(msg) nd.module.fail_json(msg=msg) + SwitchDiffEngine.validate_switch_api_fields( + nd=nd, + serial=rma_cfg.serial_number, + model=rma_cfg.model, + version=rma_cfg.version, + config_data=rma_cfg.config_data, + bootstrap_data=bootstrap_data, + log=log, + context="RMA", + ) + rma_model = self._build_rma_model( switch_cfg, rma_cfg, bootstrap_data, old_switch_info[rma_cfg.old_serial], @@ -1973,14 +2027,53 @@ def handle( self._provision_rma_switch(rma_cfg.old_serial, rma_model) switch_actions.append((rma_model.new_switch_id, switch_cfg)) - - # Post-processing: wait, save credentials, finalize - self.fabric_ops.post_add_processing( - switch_actions, - wait_utils=self.wait_utils, - context="RMA", - skip_greenfield_check=True, + rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial, switch_cfg)) + + # Post-processing: wait for RMA switches to become ready, then + # save credentials and finalize. RMA switches come up via POAP + # bootstrap and never enter migration mode, so we use the + # RMA-specific wait (unreachable → ok) instead of the generic + # wait_for_switch_manageable which would time out on the + # migration-mode phase. + all_new_serials = [sn for sn, _ in switch_actions] + log.info( + f"Waiting for {len(all_new_serials)} RMA replacement " + f"switch(es) to become ready: {all_new_serials}" ) + success = self.wait_utils.wait_for_rma_switch_ready(all_new_serials) + if not success: + msg = ( + f"One or more RMA replacement switches failed to become " + f"discoverable in fabric '{self.ctx.fabric}'. " + f"Switches: {all_new_serials}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + if self.ctx.output: + diff_items = [ + _DiffRecord({ + "seed_ip": switch_cfg.seed_ip, + "old_serial_number": old_serial, + "new_serial_number": new_serial, + "hostname": old_switch_info[old_serial]["hostname"], + "role": switch_cfg.role, + }) + for new_serial, old_serial, switch_cfg in rma_diff_data + ] + self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) + + self.fabric_ops.bulk_save_credentials(switch_actions) + + try: + self.fabric_ops.finalize() + except Exception as e: + msg = ( + f"Failed to finalize (config-save/deploy) for RMA " + f"switches {all_new_serials}: {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) log.debug("EXIT: RMAHandler.handle()") @@ -2022,6 +2115,20 @@ def _validate_prerequisites( ) ) + # Verify the seed_ip in config matches the IP of the switch + # identified by old_serial in the fabric inventory. + seed_ip = switch_cfg.seed_ip + inventory_ip = old_switch.fabric_management_ip + if seed_ip != inventory_ip: + nd.module.fail_json( + msg=( + f"RMA: seed_ip '{seed_ip}' does not match the " + f"fabric management IP '{inventory_ip}' of switch " + f"with serial '{old_serial}'. Verify that seed_ip " + f"and old_serial refer to the same switch." + ) + ) + ad = old_switch.additional_data if ad is None: nd.module.fail_json( @@ -2036,7 +2143,7 @@ def _validate_prerequisites( nd.module.fail_json( msg=( f"RMA: Switch '{old_serial}' has discovery status " - f"'{ad.discovery_status.value if ad.discovery_status else 'unknown'}', " + f"'{getattr(ad.discovery_status, 'value', ad.discovery_status) if ad.discovery_status else 'unknown'}', " f"expected 'unreachable'. The old switch must be " f"unreachable before RMA can proceed." ) @@ -2046,7 +2153,7 @@ def _validate_prerequisites( nd.module.fail_json( msg=( f"RMA: Switch '{old_serial}' is in " - f"'{ad.system_mode.value if ad.system_mode else 'unknown'}' " + f"'{getattr(ad.system_mode, 'value', ad.system_mode) if ad.system_mode else 'unknown'}' " f"mode, expected 'maintenance'. Put the switch in " f"maintenance mode before initiating RMA." ) @@ -2093,10 +2200,7 @@ def _build_rma_model( new_switch_id = rma_cfg.serial_number hostname = old_switch_info.get("hostname", "") ip = switch_cfg.seed_ip - model_name = rma_cfg.model - version = rma_cfg.version image_policy = rma_cfg.image_policy - gateway_ip_mask = rma_cfg.config_data.gateway switch_role = switch_cfg.role password = switch_cfg.password auth_proto = SnmpV3AuthProtocol.MD5 # RMA always uses MD5 @@ -2109,6 +2213,20 @@ def _build_rma_model( finger_print = bootstrap_data.get( "fingerPrint", bootstrap_data.get("fingerprint", "") ) + bs_data = bootstrap_data.get("data") or {} + + # Use user-provided values when available; fall back to bootstrap API data. + model_name = rma_cfg.model or bootstrap_data.get("model", "") + version = rma_cfg.version or bootstrap_data.get("softwareVersion", "") + gateway_ip_mask = ( + (rma_cfg.config_data.gateway if rma_cfg.config_data else None) + or bootstrap_data.get("gatewayIpMask") + or bs_data.get("gatewayIpMask") + ) + data_models = ( + (rma_cfg.config_data.models if rma_cfg.config_data else None) + or bs_data.get("models", []) + ) rma_model = RMASwitchModel( gatewayIpMask=gateway_ip_mask, @@ -2125,6 +2243,7 @@ def _build_rma_model( newSwitchId=new_switch_id, publicKey=public_key, fingerPrint=finger_print, + data={"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None, ) log.debug( @@ -2154,7 +2273,7 @@ def _provision_rma_switch( endpoint = EpManageFabricSwitchProvisionRMAPost() endpoint.fabric_name = self.ctx.fabric - endpoint.switch_id = old_switch_id + endpoint.switch_sn = old_switch_id payload = rma_model.to_payload() @@ -2241,8 +2360,8 @@ def __init__( results=results, fabric=self.fabric, log=log, - save_config=self.module.params.get("save", True), - deploy_config=self.module.params.get("deploy", True), + save_config=self.module.params.get("save"), + deploy_config=self.module.params.get("deploy"), ) # Switch collections @@ -2268,6 +2387,7 @@ def __init__( # overridden to_ansible_config() methods. self.output = NDOutput(output_level=self.module.params.get("output_level", "normal")) self.output.assign(before=self.previous, after=self.existing) + self.ctx.output = self.output # Utility instances (SwitchWaitUtils / FabricUtils depend on self) self.fabric_utils = FabricUtils(self.nd, self.fabric, log) @@ -2340,8 +2460,8 @@ def manage_state(self) -> None: """ self.log.info(f"Managing state: {self.state}") - # query / deleted — config is optional - if self.state in ("query", "deleted"): + # deleted — config is optional + if self.state == "deleted": proposed_config = ( SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config @@ -2353,9 +2473,7 @@ def manage_state(self) -> None: model_class=SwitchConfigModel, items=proposed_config ) ) - if self.state == "deleted": - return self._handle_deleted_state(proposed_config) - return self._handle_query_state(proposed_config) + return self._handle_deleted_state(proposed_config) # merged / overridden — config is required if not self.config: @@ -2403,75 +2521,6 @@ def manage_state(self) -> None: # State Handlers (orchestration only — delegate to services) # ===================================================================== - def _handle_query_state( - self, - proposed_config: Optional[List[SwitchConfigModel]] = None, - ) -> None: - """Return inventory switches matching the optional proposed config. - - Args: - proposed_config: Optional filter config list for matching switches. - - Returns: - None. - """ - self.log.debug("ENTER: _handle_query_state()") - self.log.info("Handling query state") - self.log.debug(f"Found {len(self.existing)} existing switches") - - if proposed_config is None: - matched_switches = list(self.existing) - self.log.info("No proposed config — returning all existing switches") - else: - matched_switches: List[SwitchDataModel] = [] - for cfg in proposed_config: - match = next( - ( - sw for sw in self.existing - if sw.fabric_management_ip == cfg.seed_ip - ), - None, - ) - if match is None: - self.log.info(f"Switch {cfg.seed_ip} not found in fabric") - continue - - if cfg.role is not None and match.switch_role != cfg.role: - self.log.info( - f"Switch {cfg.seed_ip} found but role mismatch: " - f"expected {cfg.role.value}, got " - f"{match.switch_role.value if match.switch_role else 'None'}" - ) - continue - - matched_switches.append(match) - - self.log.info( - f"Matched {len(matched_switches)}/{len(proposed_config)} " - f"switch(es) from proposed config" - ) - - switch_data = [sw.model_dump(by_alias=True) for sw in matched_switches] - - self.results.action = "query" - self.results.state = self.state - self.results.check_mode = self.nd.module.check_mode - self.results.operation_type = OperationType.QUERY - self.results.response_current = { - "RETURN_CODE": 200, - "MESSAGE": "OK", - "DATA": switch_data, - } - self.results.result_current = { - "found": len(matched_switches) > 0, - "success": True, - } - self.results.diff_current = {} - self.results.register_api_call() - - self.log.debug(f"Returning {len(switch_data)} switches in results") - self.log.debug("EXIT: _handle_query_state()") - def _handle_merged_state( self, diff: Dict[str, List[SwitchDataModel]], @@ -2501,19 +2550,16 @@ def _handle_merged_state( config_by_ip = {sw.seed_ip: sw for sw in proposed_config} existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} - # Phase 1: Handle role-change switches - self._merged_handle_role_changes(diff, config_by_ip, existing_by_ip) - - # Phase 2: Handle idempotent switches that may need config sync - self._merged_handle_idempotent(diff, existing_by_ip) + # Phase 1: Handle idempotent switches that may need config sync + idempotent_save_req = self._merged_handle_idempotent(diff, existing_by_ip) - # Phase 3: Fail on to_update (merged state doesn't support updates) + # Phase 2: Fail on to_update (merged state doesn't support updates) self._merged_handle_to_update(diff) switches_to_add = diff.get("to_add", []) migration_switches = diff.get("migration_mode", []) - if not switches_to_add and not migration_switches: + if not switches_to_add and not migration_switches and not idempotent_save_req: self.log.info("No switches need adding or migration processing") return @@ -2596,6 +2642,11 @@ def _handle_merged_state( # Phase 5: Collect migration switches for post-processing # Migration mode switches get role updates during post-add processing. + + have_migration_switches = False + if migration_switches: + have_migration_switches = True + for mig_sw in migration_switches: cfg = config_by_ip.get(mig_sw.fabric_management_ip) if cfg and mig_sw.switch_id: @@ -2628,7 +2679,7 @@ def _handle_merged_state( wait_utils=self.wait_utils, context="merged", all_preserve_config=all_preserve_config, - update_roles=True, + update_roles=have_migration_switches, ) self.output.assign(diff=SwitchOutputCollection(items=diff_items)) @@ -2638,75 +2689,11 @@ def _handle_merged_state( # Merged-state sub-handlers (modular phases) # ----------------------------------------------------------------- - def _merged_handle_role_changes( - self, - diff: Dict[str, List[SwitchDataModel]], - config_by_ip: Dict[str, SwitchConfigModel], - existing_by_ip: Dict[str, SwitchDataModel], - ) -> None: - """Handle role-change switches in merged state. - - Role changes are only allowed when configSyncStatus is notApplicable. - Any other status fails the module. - - Args: - diff: Categorized switch diff output. - config_by_ip: Config lookup by seed IP. - existing_by_ip: Existing switch lookup by management IP. - - Returns: - None. - """ - role_change_switches = diff.get("role_change", []) - if not role_change_switches: - return - - # Validate configSyncStatus for every role-change switch - for sw in role_change_switches: - existing_sw = existing_by_ip.get(sw.fabric_management_ip) - status = ( - existing_sw.additional_data.config_sync_status - if existing_sw and existing_sw.additional_data - else None - ) - if status != ConfigSyncStatus.NOT_APPLICABLE: - self.nd.module.fail_json( - msg=( - f"Role change not possible for switch " - f"{sw.fabric_management_ip} ({sw.switch_id}). " - f"configSyncStatus is " - f"'{getattr(status, 'value', status) if status else 'unknown'}', " - f"expected '{ConfigSyncStatus.NOT_APPLICABLE.value}'." - ) - ) - - # Build (switch_id, SwitchConfigModel) pairs and apply role change - role_actions: List[Tuple[str, SwitchConfigModel]] = [] - role_diff_items: List = [] - for sw in role_change_switches: - cfg = config_by_ip.get(sw.fabric_management_ip) - if cfg and sw.switch_id: - role_actions.append((sw.switch_id, cfg)) - # Use existing SwitchDataModel for software_version + mode; - # override role with the desired value from the playbook. - record = sw.to_config_dict() - if cfg.role is not None: - record["role"] = cfg.role - role_diff_items.append(_DiffRecord(record)) - - if role_actions: - self.log.info( - f"Performing role change for {len(role_actions)} switch(es)" - ) - self.fabric_ops.bulk_update_roles(role_actions) - self.fabric_ops.finalize() - self.output.assign(diff=SwitchOutputCollection(items=role_diff_items)) - def _merged_handle_idempotent( self, diff: Dict[str, List[SwitchDataModel]], existing_by_ip: Dict[str, SwitchDataModel], - ) -> None: + ) -> bool: """Handle idempotent switches that may need config save and deploy. If configSyncStatus is anything other than inSync, run config save @@ -2717,13 +2704,12 @@ def _merged_handle_idempotent( existing_by_ip: Existing switch lookup by management IP. Returns: - None. + bool: True if any idempotent switches require config save and deploy, False otherwise. """ idempotent_switches = diff.get("idempotent", []) if not idempotent_switches: - return + return False - finalize_needed = False for sw in idempotent_switches: existing_sw = existing_by_ip.get(sw.fabric_management_ip) status = ( @@ -2738,15 +2724,9 @@ def _merged_handle_idempotent( f"'{getattr(status, 'value', status) if status else 'unknown'}' — " f"will run config save and deploy" ) - finalize_needed = True - else: - self.log.info( - f"Switch {sw.fabric_management_ip} ({sw.switch_id}) " - f"is idempotent — no changes needed" - ) + return True - if finalize_needed: - self.fabric_ops.finalize() + return False def _merged_handle_to_update( self, @@ -2799,10 +2779,6 @@ def _handle_overridden_state( self.log.warning("No configurations provided for overridden state") return - # Merge role_change into to_update — overridden uses delete-and-re-add - diff["to_update"].extend(diff.get("role_change", [])) - diff["role_change"] = [] - # Check mode — preview only if self.nd.module.check_mode: n_delete = len(diff.get("to_delete", [])) diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py index bb81a673..dda4c712 100644 --- a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py +++ b/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py @@ -36,7 +36,7 @@ class SwitchWaitUtils: # Default wait parameters DEFAULT_MAX_ATTEMPTS: int = 300 - DEFAULT_WAIT_INTERVAL: int = 5 # seconds + DEFAULT_WAIT_INTERVAL: int = 10 # seconds # Status values indicating the switch is ready MANAGEABLE_STATUSES = frozenset({"ok", "manageable"}) @@ -178,6 +178,39 @@ def wait_for_switch_manageable( serial_numbers, "ok" ) + def wait_for_rma_switch_ready( + self, + serial_numbers: List[str], + ) -> bool: + """Wait for RMA replacement switches to become manageable. + + RMA replacement switches come up via POAP bootstrap and never enter + migration mode. Three phases are run in order: + + 1. Wait for each new serial to appear in the fabric inventory. + The controller registers the switch after ``provisionRMA`` + completes, but it may take a few polling cycles. + 2. Wait for discovery status ``ok``. + + Args: + serial_numbers: New (replacement) switch serial numbers to monitor. + + Returns: + ``True`` if all switches reach ``ok`` status, ``False`` on timeout. + """ + self.log.info( + f"Waiting for RMA replacement switch(es) to become ready " + f"(skipping migration-mode phase): {serial_numbers}" + ) + + # Phase 1: wait until all new serials appear in the fabric inventory. + # Rediscovery triggers will 400 until the switch is registered. + if not self._wait_for_switches_in_fabric(serial_numbers): + return False + + # Phase 2: wait for ok discovery status. + return self._wait_for_discovery_state(serial_numbers, "ok") + def wait_for_discovery( self, seed_ip: str, @@ -472,6 +505,64 @@ def _wait_for_discovery_state( # API Helpers # ===================================================================== + def _wait_for_switches_in_fabric( + self, + serial_numbers: List[str], + ) -> bool: + """Poll until all serial numbers appear in the fabric inventory. + + After ``provisionRMA`` the controller registers the new switch + asynchronously. Rediscovery requests will fail with 400 + "Switch not found" until the switch is registered, so we must + wait for it to appear before triggering any rediscovery. + + Args: + serial_numbers: Switch serial numbers to wait for. + + Returns: + ``True`` when all serials are present, ``False`` on timeout. + """ + pending = list(serial_numbers) + self.log.info( + f"Waiting for {len(pending)} switch(es) to appear in " + f"fabric inventory: {pending}" + ) + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return True + + switch_data = self._fetch_switch_data() + if switch_data is None: + # API error — keep waiting + time.sleep(self.wait_interval) + continue + + known_serials = { + sw.get("serialNumber") for sw in switch_data + } + pending = [ + sn for sn in pending if sn not in known_serials + ] + + if not pending: + self.log.info( + f"All RMA switch(es) now visible in fabric inventory " + f"(attempt {attempt})" + ) + return True + + self.log.debug( + f"Attempt {attempt}/{self.max_attempts}: " + f"{len(pending)} switch(es) not yet in fabric: {pending}" + ) + time.sleep(self.wait_interval) + + self.log.warning( + f"Timeout waiting for switches to appear in fabric: {pending}" + ) + return False + def _fetch_switch_data( self, ) -> Optional[List[Dict[str, Any]]]: From 3bfc072605f57fc961b933f0c6e3aa07def07efd Mon Sep 17 00:00:00 2001 From: AKDRG Date: Wed, 18 Mar 2026 16:00:37 +0530 Subject: [PATCH 018/109] Fix Module and Models Parameters, Imports, Docstrings. Add Idempotence Handling for POAP, Preprovision Skip Discovery for Existing Switches --- .../module_utils/endpoints/query_params.py | 10 +- .../manage/nd_manage_switches/credentials.py | 2 +- .../nd_manage_switches/fabric_bootstrap.py | 2 +- .../nd_manage_switches/fabric_config.py | 8 +- .../nd_manage_switches/fabric_discovery.py | 2 +- .../fabric_switch_actions.py | 14 +- .../nd_manage_switches/fabric_switches.py | 4 +- .../models/nd_manage_switches/__init__.py | 141 ------------------ .../nd_manage_switches/bootstrap_models.py | 34 ++--- .../nd_manage_switches/config_models.py | 7 +- .../nd_manage_switches/discovery_models.py | 19 +-- .../models/nd_manage_switches/enums.py | 42 +++++- .../nd_manage_switches/preprovision_models.py | 8 +- .../models/nd_manage_switches/rma_models.py | 8 +- .../switch_actions_models.py | 6 +- .../nd_manage_switches/switch_data_models.py | 6 +- .../models/nd_manage_switches/validators.py | 2 +- plugins/module_utils/nd_switch_resources.py | 62 +++++++- plugins/modules/nd_manage_switches.py | 54 ++----- 19 files changed, 174 insertions(+), 257 deletions(-) delete mode 100644 plugins/module_utils/models/nd_manage_switches/__init__.py diff --git a/plugins/module_utils/endpoints/query_params.py b/plugins/module_utils/endpoints/query_params.py index 5bf8ff08..0d2c112e 100644 --- a/plugins/module_utils/endpoints/query_params.py +++ b/plugins/module_utils/endpoints/query_params.py @@ -211,8 +211,14 @@ def to_query_string(self, url_encode: bool = True) -> str: params = [] for field_name, field_value in self.model_dump(exclude_none=True).items(): if field_value is not None: - # URL-encode the value if requested - encoded_value = quote(str(field_value), safe="") if url_encode else str(field_value) + # URL-encode the value if requested. + # Lucene filter expressions require ':' and ' ' to remain unencoded + # so the server-side parser can recognise the field:value syntax. + if url_encode: + safe_chars = ": " if field_name == "filter" else "" + encoded_value = quote(str(field_value), safe=safe_chars) + else: + encoded_value = str(field_value) params.append(f"{field_name}={encoded_value}") return "&".join(params) diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py index 9ca94d09..ae40a17a 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py @@ -118,7 +118,7 @@ class EpManageCredentialsSwitchesPost(_EpManageCredentialsSwitchesBase): """ class_name: Literal["EpManageCredentialsSwitchesPost"] = Field( - default="EpManageCredentialsSwitchesPost", description="Class name for backward compatibility" + default="EpManageCredentialsSwitchesPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: CredentialsSwitchesEndpointParams = Field( default_factory=CredentialsSwitchesEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py index 25432637..5bef2ff5 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py @@ -127,7 +127,7 @@ class EpManageFabricBootstrapGet(_EpManageFabricBootstrapBase): """ class_name: Literal["EpManageFabricBootstrapGet"] = Field( - default="EpManageFabricBootstrapGet", description="Class name for backward compatibility" + default="EpManageFabricBootstrapGet", frozen=True, description="Class name for backward compatibility" ) endpoint_params: FabricBootstrapEndpointParams = Field( default_factory=FabricBootstrapEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py index 5ab75028..b8a5d906 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py @@ -112,7 +112,7 @@ class EpManageFabricConfigSavePost(_EpManageFabricConfigBase): """ class_name: Literal["EpManageFabricConfigSavePost"] = Field( - default="EpManageFabricConfigSavePost", description="Class name for backward compatibility" + default="EpManageFabricConfigSavePost", frozen=True, description="Class name for backward compatibility" ) @property @@ -170,7 +170,7 @@ class EpManageFabricConfigDeployPost(_EpManageFabricConfigBase): """ class_name: Literal["EpManageFabricConfigDeployPost"] = Field( - default="EpManageFabricConfigDeployPost", description="Class name for backward compatibility" + default="EpManageFabricConfigDeployPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: FabricConfigDeployEndpointParams = Field( default_factory=FabricConfigDeployEndpointParams, description="Endpoint-specific query parameters" @@ -228,7 +228,7 @@ class EpManageFabricGet(_EpManageFabricConfigBase): """ class_name: Literal["EpManageFabricGet"] = Field( - default="EpManageFabricGet", description="Class name for backward compatibility" + default="EpManageFabricGet", frozen=True, description="Class name for backward compatibility" ) @property @@ -271,7 +271,7 @@ class EpManageFabricInventoryDiscoverGet(_EpManageFabricConfigBase): """ class_name: Literal["EpManageFabricInventoryDiscoverGet"] = Field( - default="EpManageFabricInventoryDiscoverGet", description="Class name for backward compatibility" + default="EpManageFabricInventoryDiscoverGet", frozen=True, description="Class name for backward compatibility" ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py index e2416f98..8bfc45d9 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py @@ -82,7 +82,7 @@ class EpManageFabricShallowDiscoveryPost(_EpManageFabricDiscoveryBase): """ class_name: Literal["EpManageFabricShallowDiscoveryPost"] = Field( - default="EpManageFabricShallowDiscoveryPost", description="Class name for backward compatibility" + default="EpManageFabricShallowDiscoveryPost", frozen=True, description="Class name for backward compatibility" ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py index 40ea5808..5a455091 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py @@ -183,7 +183,7 @@ class EpManageFabricSwitchActionsRemovePost(_EpManageFabricSwitchActionsBase): """ class_name: Literal["EpManageFabricSwitchActionsRemovePost"] = Field( - default="EpManageFabricSwitchActionsRemovePost", description="Class name for backward compatibility" + default="EpManageFabricSwitchActionsRemovePost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsRemoveEndpointParams = Field( default_factory=SwitchActionsRemoveEndpointParams, description="Endpoint-specific query parameters" @@ -255,7 +255,7 @@ class EpManageFabricSwitchActionsChangeRolesPost(_EpManageFabricSwitchActionsBas """ class_name: Literal["EpManageFabricSwitchActionsChangeRolesPost"] = Field( - default="EpManageFabricSwitchActionsChangeRolesPost", + default="EpManageFabricSwitchActionsChangeRolesPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( @@ -330,7 +330,7 @@ class EpManageFabricSwitchActionsImportBootstrapPost(_EpManageFabricSwitchAction """ class_name: Literal["EpManageFabricSwitchActionsImportBootstrapPost"] = Field( - default="EpManageFabricSwitchActionsImportBootstrapPost", description="Class name for backward compatibility" + default="EpManageFabricSwitchActionsImportBootstrapPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsImportEndpointParams = Field( default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" @@ -412,7 +412,7 @@ class EpManageFabricSwitchActionsPreProvisionPost(_EpManageFabricSwitchActionsBa """ class_name: Literal["EpManageFabricSwitchActionsPreProvisionPost"] = Field( - default="EpManageFabricSwitchActionsPreProvisionPost", + default="EpManageFabricSwitchActionsPreProvisionPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsImportEndpointParams = Field( @@ -510,7 +510,7 @@ class EpManageFabricSwitchProvisionRMAPost(_EpManageFabricSwitchActionsPerSwitch """ class_name: Literal["EpManageFabricSwitchProvisionRMAPost"] = Field( - default="EpManageFabricSwitchProvisionRMAPost", description="Class name for backward compatibility" + default="EpManageFabricSwitchProvisionRMAPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsTicketEndpointParams = Field( default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" @@ -609,7 +609,7 @@ class EpManageFabricSwitchChangeSerialNumberPost(_EpManageFabricSwitchActionsPer """ class_name: Literal["EpManageFabricSwitchChangeSerialNumberPost"] = Field( - default="EpManageFabricSwitchChangeSerialNumberPost", description="Class name for backward compatibility" + default="EpManageFabricSwitchChangeSerialNumberPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsClusterEndpointParams = Field( default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" @@ -686,7 +686,7 @@ class EpManageFabricSwitchActionsRediscoverPost(_EpManageFabricSwitchActionsBase """ class_name: Literal["EpManageFabricSwitchActionsRediscoverPost"] = Field( - default="EpManageFabricSwitchActionsRediscoverPost", + default="EpManageFabricSwitchActionsRediscoverPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py index 2334e98c..a1498cb6 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py @@ -155,7 +155,7 @@ class EpManageFabricSwitchesGet(_EpManageFabricSwitchesBase): """ class_name: Literal["EpManageFabricSwitchesGet"] = Field( - default="EpManageFabricSwitchesGet", description="Class name for backward compatibility" + default="EpManageFabricSwitchesGet", frozen=True, description="Class name for backward compatibility" ) endpoint_params: FabricSwitchesGetEndpointParams = Field( default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" @@ -228,7 +228,7 @@ class EpManageFabricSwitchesPost(_EpManageFabricSwitchesBase): """ class_name: Literal["EpManageFabricSwitchesPost"] = Field( - default="EpManageFabricSwitchesPost", description="Class name for backward compatibility" + default="EpManageFabricSwitchesPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: FabricSwitchesAddEndpointParams = Field( default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" diff --git a/plugins/module_utils/models/nd_manage_switches/__init__.py b/plugins/module_utils/models/nd_manage_switches/__init__.py deleted file mode 100644 index 17415a32..00000000 --- a/plugins/module_utils/models/nd_manage_switches/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""nd_manage_switches models package. - -Re-exports all model classes, enums, and validators from their individual -modules so that consumers can import directly from the package: - - from .models.nd_manage_switches import SwitchConfigModel, SwitchRole, ... -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -# --- Enums --- -from .enums import ( # noqa: F401 - AdvisoryLevel, - AnomalyLevel, - ConfigSyncStatus, - DiscoveryStatus, - PlatformType, - RemoteCredentialStore, - SnmpV3AuthProtocol, - SwitchRole, - SystemMode, - VpcRole, -) - -# --- Validators --- -from .validators import SwitchValidators # noqa: F401 - -# --- Nested / shared models --- -from .switch_data_models import ( # noqa: F401 - AdditionalAciSwitchData, - AdditionalSwitchData, - Metadata, - SwitchMetadata, - TelemetryIpCollection, - VpcData, -) - -# --- Discovery models --- -from .discovery_models import ( # noqa: F401 - AddSwitchesRequestModel, - ShallowDiscoveryRequestModel, - SwitchDiscoveryModel, -) - -# --- Switch data models --- -from .switch_data_models import ( # noqa: F401 - SwitchDataModel, -) - -# --- Bootstrap models --- -from .bootstrap_models import ( # noqa: F401 - BootstrapBaseData, - BootstrapBaseModel, - BootstrapCredentialModel, - BootstrapImportSpecificModel, - BootstrapImportSwitchModel, - ImportBootstrapSwitchesRequestModel, -) - -# --- Preprovision models --- -from .preprovision_models import ( # noqa: F401 - PreProvisionSwitchesRequestModel, - PreProvisionSwitchModel, -) - -# --- RMA models --- -from .rma_models import ( # noqa: F401 - RMASwitchModel, -) - -# --- Switch actions models --- -from .switch_actions_models import ( # noqa: F401 - ChangeSwitchSerialNumberRequestModel, - SwitchCredentialsRequestModel, -) - -# --- Config / playbook models --- -from .config_models import ( # noqa: F401 - ConfigDataModel, - POAPConfigModel, - RMAConfigModel, - SwitchConfigModel, -) - - -__all__ = [ - # Enums - "AdvisoryLevel", - "AnomalyLevel", - "ConfigSyncStatus", - "DiscoveryStatus", - "PlatformType", - "RemoteCredentialStore", - "SnmpV3AuthProtocol", - "SwitchRole", - "SystemMode", - "VpcRole", - # Validators - "SwitchValidators", - # Nested models - "AdditionalAciSwitchData", - "AdditionalSwitchData", - "Metadata", - "SwitchMetadata", - "TelemetryIpCollection", - "VpcData", - # Discovery models - "AddSwitchesRequestModel", - "ShallowDiscoveryRequestModel", - "SwitchDiscoveryModel", - # Switch data models - "SwitchDataModel", - # Bootstrap models - "BootstrapBaseData", - "BootstrapBaseModel", - "BootstrapCredentialModel", - "BootstrapImportSpecificModel", - "BootstrapImportSwitchModel", - "ImportBootstrapSwitchesRequestModel", - # Preprovision models - "PreProvisionSwitchesRequestModel", - "PreProvisionSwitchModel", - # RMA models - "RMASwitchModel", - # Switch actions models - "ChangeSwitchSerialNumberRequestModel", - "SwitchCredentialsRequestModel", - # Config models - "ConfigDataModel", - "POAPConfigModel", - "RMAConfigModel", - "SwitchConfigModel", -] diff --git a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py index 9825c0c3..62dbcfd4 100644 --- a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """Bootstrap (POAP) switch models for import operations. -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -20,12 +20,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class BootstrapBaseData(NDNestedModel): @@ -243,17 +243,17 @@ class BootstrapImportSwitchModel(NDBaseModel): ..., description="Model of the bootstrap switch" ) - version: str = Field( + software_version: str = Field( ..., + alias="softwareVersion", description="Software version of the bootstrap switch" ) hostname: str = Field( ..., description="Hostname of the bootstrap switch" ) - ip_address: str = Field( + ip: str = Field( ..., - alias="ipAddress", description="IP address of the bootstrap switch" ) password: str = Field( @@ -288,6 +288,7 @@ class BootstrapImportSwitchModel(NDBaseModel): ) fingerprint: str = Field( default="", + alias="fingerPrint", description="SSH fingerprint from bootstrap GET API" ) public_key: str = Field( @@ -298,7 +299,7 @@ class BootstrapImportSwitchModel(NDBaseModel): re_add: bool = Field( default=False, alias="reAdd", - description="Re-add flag from bootstrap GET API" + description="Whether to re-add an already-seen switch" ) in_inventory: bool = Field( default=False, @@ -313,24 +314,15 @@ class BootstrapImportSwitchModel(NDBaseModel): default=None, alias="switchRole" ) - ip: Optional[str] = Field( - default=None, - description="IP address (duplicate of ipAddress for API compatibility)" - ) - software_version: Optional[str] = Field( - default=None, - alias="softwareVersion", - description="Software version (duplicate of version for API compatibility)" - ) - gateway_ip_mask: Optional[str] = Field( - default=None, + gateway_ip_mask: str = Field( + ..., alias="gatewayIpMask", description="Gateway IP address with mask" ) - @field_validator('ip_address', mode='before') + @field_validator('ip', mode='before') @classmethod - def validate_ip_address(cls, v: str) -> str: + def validate_ip_field(cls, v: str) -> str: result = SwitchValidators.validate_ip_address(v) if result is None: raise ValueError(f"Invalid IP address: {v}") diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/nd_manage_switches/config_models.py index 4c22acec..40ab587b 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/nd_manage_switches/config_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -9,7 +9,6 @@ These models represent the user-facing configuration schema used in Ansible playbooks for normal switch addition, POAP, and RMA operations. -Based on: dcnm_inventory.py config suboptions """ from __future__ import absolute_import, division, print_function @@ -25,12 +24,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( PlatformType, SnmpV3AuthProtocol, SwitchRole, ) -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class ConfigDataModel(NDNestedModel): diff --git a/plugins/module_utils/models/nd_manage_switches/discovery_models.py b/plugins/module_utils/models/nd_manage_switches/discovery_models.py index 4e6fb667..df79aaf8 100644 --- a/plugins/module_utils/models/nd_manage_switches/discovery_models.py +++ b/plugins/module_utils/models/nd_manage_switches/discovery_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """Switch discovery models for shallow discovery and fabric add operations. -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -19,13 +19,14 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( PlatformType, RemoteCredentialStore, + ShallowDiscoveryPlatformType, SnmpV3AuthProtocol, SwitchRole, ) -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class ShallowDiscoveryRequestModel(NDBaseModel): @@ -50,10 +51,10 @@ class ShallowDiscoveryRequestModel(NDBaseModel): le=7, description="Max hop" ) - platform_type: PlatformType = Field( - default=PlatformType.NX_OS, + platform_type: ShallowDiscoveryPlatformType = Field( + default=ShallowDiscoveryPlatformType.NX_OS, alias="platformType", - description="Switch platform type" + description="Switch platform type (apic is not supported for shallow discovery)" ) snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( default=SnmpV3AuthProtocol.MD5, @@ -102,9 +103,9 @@ def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3A @field_validator('platform_type', mode='before') @classmethod - def normalize_platform(cls, v: Union[str, PlatformType, None]) -> PlatformType: + def normalize_platform(cls, v: Union[str, ShallowDiscoveryPlatformType, None]) -> ShallowDiscoveryPlatformType: """Normalize platform type (case-insensitive).""" - return PlatformType.normalize(v) + return ShallowDiscoveryPlatformType.normalize(v) class SwitchDiscoveryModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/enums.py b/plugins/module_utils/models/nd_manage_switches/enums.py index 93f93083..b88216ad 100644 --- a/plugins/module_utils/models/nd_manage_switches/enums.py +++ b/plugins/module_utils/models/nd_manage_switches/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -120,7 +120,9 @@ class PlatformType(str, Enum): """ Switch platform type enumeration. - Based on: components/schemas (multiple references) + Used for POST /fabrics/{fabricName}/switches (AddSwitches). + Includes all platform types supported by the add-switches endpoint. + Based on: components/schemas """ NX_OS = "nx-os" OTHER = "other" @@ -150,6 +152,41 @@ def normalize(cls, value: Union[str, "PlatformType", None]) -> "PlatformType": return pt raise ValueError(f"Invalid PlatformType: {value}. Valid: {cls.choices()}") +class ShallowDiscoveryPlatformType(str, Enum): + """ + Platform type for shallow discovery. + + Used for POST /fabrics/{fabricName}/actions/shallowDiscovery only. + Excludes 'apic' which is not supported by the shallowDiscovery endpoint. + Based on: components/schemas/shallowDiscoveryRequest.platformType + """ + NX_OS = "nx-os" + OTHER = "other" + IOS_XE = "ios-xe" + IOS_XR = "ios-xr" + SONIC = "sonic" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "ShallowDiscoveryPlatformType", None]) -> "ShallowDiscoveryPlatformType": + """ + Normalize input to enum value (case-insensitive). + Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. + """ + if value is None: + return cls.NX_OS + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace('_', '-') + for pt in cls: + if pt.value == v_normalized: + return pt + raise ValueError(f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}") + class SnmpV3AuthProtocol(str, Enum): """ @@ -310,6 +347,7 @@ def choices(cls) -> List[str]: "SwitchRole", "SystemMode", "PlatformType", + "ShallowDiscoveryPlatformType", "SnmpV3AuthProtocol", "DiscoveryStatus", "ConfigSyncStatus", diff --git a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py index 9e34910a..d42daa2d 100644 --- a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/nd_manage_switches/preprovision_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """Pre-provision switch models. -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -20,12 +20,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class PreProvisionSwitchModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/rma_models.py b/plugins/module_utils/models/nd_manage_switches/rma_models.py index 12f6c891..f1f692fa 100644 --- a/plugins/module_utils/models/nd_manage_switches/rma_models.py +++ b/plugins/module_utils/models/nd_manage_switches/rma_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """RMA (Return Material Authorization) switch models. -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -19,12 +19,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class RMASwitchModel(NDBaseModel): """ diff --git a/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py b/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py index 76b207da..18113445 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """Switch action models (serial number change, IDs list, credentials). -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -19,7 +19,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from .validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators class SwitchCredentialsRequestModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py index ccfb571f..484815d8 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/nd_manage_switches/switch_data_models.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """Switch inventory data models (API response representations). -Based on OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. """ from __future__ import absolute_import, division, print_function @@ -20,7 +20,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from .enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( AdvisoryLevel, AnomalyLevel, ConfigSyncStatus, diff --git a/plugins/module_utils/models/nd_manage_switches/validators.py b/plugins/module_utils/models/nd_manage_switches/validators.py index b2e3a704..e3ceb3a6 100644 --- a/plugins/module_utils/models/nd_manage_switches/validators.py +++ b/plugins/module_utils/models/nd_manage_switches/validators.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index e0fa6ef0..59d4e9ca 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat C S (@achengam) +# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -721,9 +721,9 @@ def build_proposed( ) if existing_match: proposed.append(existing_match) - log.warning( - f"Switch {seed_ip} not discovered but found in existing " - f"inventory — using existing record for comparison" + log.debug( + f"Switch {seed_ip} already in fabric inventory — " + f"using existing record (discovery skipped)" ) continue @@ -1260,6 +1260,43 @@ def handle( f"{len(swap_entries)} swap" ) + # Idempotency: skip entries whose target serial is already in the fabric. + # Build lookup structures for idempotency checks. + # Bootstrap: idempotent when both IP address AND serial number match. + # PreProvision: idempotent when IP address alone matches. + existing_by_ip = { + sw.fabric_management_ip: sw + for sw in existing + if sw.fabric_management_ip + } + + active_bootstrap = [] + for switch_cfg, poap_cfg in bootstrap_entries: + existing_sw = existing_by_ip.get(switch_cfg.seed_ip) + if existing_sw and poap_cfg.serial_number in ( + existing_sw.serial_number, + existing_sw.switch_id, + ): + log.info( + f"Bootstrap: IP '{switch_cfg.seed_ip}' with serial " + f"'{poap_cfg.serial_number}' already in fabric " + f"— idempotent, skipping" + ) + else: + active_bootstrap.append((switch_cfg, poap_cfg)) + bootstrap_entries = active_bootstrap + + active_preprov = [] + for switch_cfg, poap_cfg in preprov_entries: + if switch_cfg.seed_ip in existing_by_ip: + log.info( + f"PreProvision: IP '{switch_cfg.seed_ip}' already in fabric " + f"— idempotent, skipping" + ) + else: + active_preprov.append((switch_cfg, poap_cfg)) + preprov_entries = active_preprov + # Handle swap entries (change serial number on pre-provisioned switches) if swap_entries: self._handle_poap_swap(swap_entries, existing or []) @@ -1476,9 +1513,8 @@ def _build_bootstrap_import_model( bootstrap_model = BootstrapImportSwitchModel( serialNumber=serial_number, model=model, - version=version, hostname=hostname, - ipAddress=ip, + ip=ip, password=password, discoveryAuthProtocol=auth_proto, discoveryUsername=discovery_username, @@ -1490,7 +1526,6 @@ def _build_bootstrap_import_model( inInventory=in_inventory, imagePolicy=image_policy or "", switchRole=switch_role, - ip=ip, softwareVersion=version, gatewayIpMask=gateway_ip_mask, ) @@ -2499,7 +2534,18 @@ def manage_state(self) -> None: return self.rma_handler.handle(proposed_config, list(self.existing)) # Normal: discover → build proposed models → compute diff → delegate - discovered_data = self.discovery.discover(proposed_config) + # Skip discovery for switches already in the fabric. + existing_ips = {sw.fabric_management_ip for sw in self.existing} + configs_to_discover = [cfg for cfg in proposed_config if cfg.seed_ip not in existing_ips] + if configs_to_discover: + self.log.info( + f"Discovery needed for {len(configs_to_discover)}/{len(proposed_config)} " + f"switch(es) — {len(proposed_config) - len(configs_to_discover)} already in fabric" + ) + discovered_data = self.discovery.discover(configs_to_discover) + else: + self.log.info("All proposed switches already in fabric — skipping discovery") + discovered_data = {} built = self.discovery.build_proposed( proposed_config, discovered_data, list(self.existing) ) diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index f3d14ca6..6b86a9b7 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -196,12 +196,10 @@ description: - Model of switch to Bootstrap for RMA. type: str - required: true version: description: - Software version of switch to Bootstrap for RMA. type: str - required: true image_policy: description: - Name of the image policy to be applied on switch during Bootstrap for RMA. @@ -213,7 +211,6 @@ - C(models) is list of model of modules in switch to Bootstrap for RMA. - C(gateway) is the gateway IP with mask for the switch to Bootstrap for RMA. type: dict - required: true suboptions: models: description: @@ -229,48 +226,27 @@ - Serial number of new replacement switch. type: str required: true - model: - description: - - Model of new switch. - type: str - required: true - version: - description: - - Software version of new switch. - type: str - required: true - hostname: - description: - - Hostname for the replacement switch. - type: str - required: true - image_policy: - description: - - Image policy to apply. - type: str - required: true - ip: - description: - - IP address of the replacement switch. - type: str - required: true - gateway_ip: - description: - - Gateway IP with subnet mask. - type: str - required: true - discovery_password: - description: - - Password for device discovery during RMA. - type: str - required: true + extends_documentation_fragment: - cisco.nd.modules - cisco.nd.check_mode notes: -- This module requires NDFC 12.x or higher. +- This module requires ND 12.x or higher. - POAP operations require POAP and DHCP to be enabled in fabric settings. - RMA operations require the old switch to be in a replaceable state. +- Idempotence for B(Bootstrap) - A bootstrap entry is considered idempotent when + the C(seed_ip) already exists in the fabric inventory B(and) the C(serial_number) + in the POAP config matches the serial number recorded for that IP in inventory. + Both conditions must be true; a matching IP with a different serial is not + treated as idempotent and will attempt the bootstrap again. +- Idempotence for B(Pre-provision) - A pre-provision entry is considered idempotent + when the C(seed_ip) already exists in the fabric inventory, regardless of the + C(preprovision_serial) value. Because the pre-provision serial is a placeholder + that may differ from the real hardware serial, only the IP address is used as + the stable identity for idempotency checks. +- Idempotence for B(normal discovery) - A switch is considered idempotent when + its C(seed_ip) already exists in the fabric inventory with no configuration + drift (same role). """ EXAMPLES = """ From 1569e417f089608bc54b8facc1f099628116784c Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 19 Mar 2026 12:07:59 +0530 Subject: [PATCH 019/109] Change folder structure for models, remove query handling and allow RMA, POAP, Normal Discovery handling in same task. --- .../bootstrap_models.py | 4 +- .../config_models.py | 117 ++---------------- .../discovery_models.py | 4 +- .../enums.py | 0 .../preprovision_models.py | 4 +- .../rma_models.py | 4 +- .../switch_actions_models.py | 2 +- .../switch_data_models.py | 2 +- .../validators.py | 0 plugins/module_utils/nd_switch_resources.py | 106 ++++++++-------- plugins/modules/nd_manage_switches.py | 16 +-- 11 files changed, 77 insertions(+), 182 deletions(-) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/bootstrap_models.py (98%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/config_models.py (84%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/discovery_models.py (97%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/enums.py (100%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/preprovision_models.py (97%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/rma_models.py (96%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/switch_actions_models.py (97%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/switch_data_models.py (99%) rename plugins/module_utils/models/{nd_manage_switches => manage_switches}/validators.py (100%) diff --git a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py similarity index 98% rename from plugins/module_utils/models/nd_manage_switches/bootstrap_models.py rename to plugins/module_utils/models/manage_switches/bootstrap_models.py index 62dbcfd4..0d72ebed 100644 --- a/plugins/module_utils/models/nd_manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -20,12 +20,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class BootstrapBaseData(NDNestedModel): diff --git a/plugins/module_utils/models/nd_manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py similarity index 84% rename from plugins/module_utils/models/nd_manage_switches/config_models.py rename to plugins/module_utils/models/manage_switches/config_models.py index 40ab587b..b596ca6f 100644 --- a/plugins/module_utils/models/nd_manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -24,12 +24,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( PlatformType, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class ConfigDataModel(NDNestedModel): @@ -337,11 +337,9 @@ class SwitchConfigModel(NDBaseModel): # Fields excluded from diff — only seed_ip + role are compared exclude_from_diff: ClassVar[List[str]] = [ - "user_name", "password", "auth_proto", "max_hops", + "user_name", "password", "auth_proto", "preserve_config", "platform_type", "poap", "rma", "operation_type", - "switch_id", "serial_number", "mode", "hostname", - "model", "software_version", ] # Required fields @@ -362,20 +360,12 @@ class SwitchConfigModel(NDBaseModel): default=None, description="Login password to the switch (required for merged/overridden states)" ) - # Optional fields with defaults auth_proto: SnmpV3AuthProtocol = Field( default=SnmpV3AuthProtocol.MD5, alias="authProto", description="Authentication protocol to use" ) - max_hops: int = Field( - default=0, - alias="maxHops", - ge=0, - le=7, - description="Maximum hops to reach the switch (deprecated, defaults to 0)" - ) role: Optional[SwitchRole] = Field( default=None, description="Role to assign to the switch. None means not specified (uses controller default)." @@ -419,35 +409,6 @@ def operation_type(self) -> Literal["normal", "poap", "rma"]: return "rma" return "normal" - # API-derived fields (populated by from_response, never set by users) - switch_id: Optional[str] = Field( - default=None, - alias="switchId", - description="Serial number / switch ID from inventory API" - ) - serial_number: Optional[str] = Field( - default=None, - alias="serialNumber", - description="Serial number from inventory API" - ) - mode: Optional[str] = Field( - default=None, - description="Switch mode from inventory API (Normal, Migration, etc.)" - ) - hostname: Optional[str] = Field( - default=None, - description="Switch hostname from inventory API" - ) - model: Optional[str] = Field( - default=None, - description="Switch model from inventory API" - ) - software_version: Optional[str] = Field( - default=None, - alias="softwareVersion", - description="Software version from inventory API" - ) - def to_config_dict(self) -> Dict[str, Any]: """Return the playbook config as a dict with all credentials stripped. @@ -534,10 +495,10 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: """ state = (info.context or {}).get("state") if info else None - # POAP only allowed with merged or query - if self.poap and state not in (None, "merged", "query"): + # POAP only allowed with merged + if self.poap and state not in (None, "merged"): raise ValueError( - f"POAP operations require 'merged' or 'query' state, " + f"POAP operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})" ) @@ -623,76 +584,12 @@ def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformT """Normalize platform_type for case-insensitive matching (NX_OS, nx-os, etc.).""" return PlatformType.normalize(v) - @classmethod - def validate_no_mixed_operations( - cls, configs: List["SwitchConfigModel"] - ) -> None: - """Validate that a list of configs does not mix operation types. - - POAP, RMA, and normal switch operations cannot be combined - in the same Ansible task. Call this after validating all - individual configs. - - Args: - configs: List of validated SwitchConfigModel instances. - - Raises: - ValueError: If more than one operation type is present. - """ - op_types = {cfg.operation_type for cfg in configs} - if len(op_types) > 1: - raise ValueError( - "Mixed operation types detected: " - f"{', '.join(sorted(op_types))}. " - "POAP, RMA, and normal switch operations " - "cannot be mixed in the same task. " - "Please separate them into different tasks." - ) - def to_payload(self) -> Dict[str, Any]: - """Convert to API payload format. - - Excludes API-derived fields that are not part of the user config. - """ + """Convert to API payload format.""" return self.model_dump( by_alias=True, exclude_none=True, - exclude={ - "switch_id", "serial_number", "mode", - "hostname", "model", "software_version", - }, - ) - - @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: - """Create model instance from inventory or discovery API response. - - Handles two formats: - 1. Inventory API: {switchId, fabricManagementIp, switchRole, ...} - 2. Discovery API: {serialNumber, ip, hostname, ...} - """ - mapped: Dict[str, Any] = {} - - # seed_ip from fabricManagementIp (inventory) or ip (discovery) - ip = response.get("fabricManagementIp") or response.get("ip") - if ip: - mapped["seedIp"] = ip - - # role from switchRole - role = response.get("switchRole") - if role: - mapped["role"] = role - - # Direct API fields - direct_fields = ( - "switchId", "serialNumber", "softwareVersion", - "mode", "hostname", "model", ) - for key in direct_fields: - if key in response and response[key] is not None: - mapped[key] = response[key] - - return cls.model_validate(mapped) __all__ = [ diff --git a/plugins/module_utils/models/nd_manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py similarity index 97% rename from plugins/module_utils/models/nd_manage_switches/discovery_models.py rename to plugins/module_utils/models/manage_switches/discovery_models.py index df79aaf8..dfe190f0 100644 --- a/plugins/module_utils/models/nd_manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -19,14 +19,14 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( PlatformType, RemoteCredentialStore, ShallowDiscoveryPlatformType, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class ShallowDiscoveryRequestModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py similarity index 100% rename from plugins/module_utils/models/nd_manage_switches/enums.py rename to plugins/module_utils/models/manage_switches/enums.py diff --git a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py similarity index 97% rename from plugins/module_utils/models/nd_manage_switches/preprovision_models.py rename to plugins/module_utils/models/manage_switches/preprovision_models.py index d42daa2d..ba073824 100644 --- a/plugins/module_utils/models/nd_manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -20,12 +20,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class PreProvisionSwitchModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py similarity index 96% rename from plugins/module_utils/models/nd_manage_switches/rma_models.py rename to plugins/module_utils/models/manage_switches/rma_models.py index f1f692fa..7760d11b 100644 --- a/plugins/module_utils/models/nd_manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -19,12 +19,12 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class RMASwitchModel(NDBaseModel): """ diff --git a/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py similarity index 97% rename from plugins/module_utils/models/nd_manage_switches/switch_actions_models.py rename to plugins/module_utils/models/manage_switches/switch_actions_models.py index 18113445..5f903e65 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -19,7 +19,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators class SwitchCredentialsRequestModel(NDBaseModel): diff --git a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py similarity index 99% rename from plugins/module_utils/models/nd_manage_switches/switch_data_models.py rename to plugins/module_utils/models/manage_switches/switch_data_models.py index 484815d8..e4de26cb 100644 --- a/plugins/module_utils/models/nd_manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -20,7 +20,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches.enums import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( AdvisoryLevel, AnomalyLevel, ConfigSyncStatus, diff --git a/plugins/module_utils/models/nd_manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py similarity index 100% rename from plugins/module_utils/models/nd_manage_switches/validators.py rename to plugins/module_utils/models/manage_switches/validators.py diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 59d4e9ca..638e870d 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -26,7 +26,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results -from ansible_collections.cisco.nd.plugins.module_utils.models.nd_manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches import ( SwitchRole, SnmpV3AuthProtocol, PlatformType, @@ -211,17 +211,6 @@ def validate_configs( log.warning("No valid configurations found in input") return validated_configs - # Cross-config check — model can't do this per-instance - try: - SwitchConfigModel.validate_no_mixed_operations(validated_configs) - except ValueError as e: - error_msg = str(e) - log.error(error_msg) - if hasattr(nd, 'module'): - nd.module.fail_json(msg=error_msg) - else: - raise - # Duplicate seed_ip check seen_ips: set = set() duplicate_ips: set = set() @@ -240,14 +229,14 @@ def validate_configs( else: raise ValueError(error_msg) - operation_type = validated_configs[0].operation_type + operation_types = {c.operation_type for c in validated_configs} log.info( f"Successfully validated {len(validated_configs)} " - f"configuration(s) with operation type: {operation_type}" + f"configuration(s) with operation type(s): {operation_types}" ) log.debug( f"EXIT: validate_configs() -> " - f"{len(validated_configs)} configs, operation_type={operation_type}" + f"{len(validated_configs)} configs, operation_types={operation_types}" ) return validated_configs @@ -2487,7 +2476,7 @@ def manage_state(self) -> None: """Dispatch the requested module state to the appropriate workflow. This method validates input, routes POAP and RMA operations to dedicated - handlers, and executes state-specific orchestration for query, merged, + handlers, and executes state-specific orchestration for merged, overridden, and deleted operations. Returns: @@ -2525,43 +2514,58 @@ def manage_state(self) -> None: model_class=SwitchConfigModel, items=proposed_config ) ) - self.operation_type = proposed_config[0].operation_type - - # POAP and RMA bypass normal discovery — delegate to handlers - if self.operation_type == "poap": - return self.poap_handler.handle(proposed_config, list(self.existing)) - if self.operation_type == "rma": - return self.rma_handler.handle(proposed_config, list(self.existing)) - - # Normal: discover → build proposed models → compute diff → delegate - # Skip discovery for switches already in the fabric. - existing_ips = {sw.fabric_management_ip for sw in self.existing} - configs_to_discover = [cfg for cfg in proposed_config if cfg.seed_ip not in existing_ips] - if configs_to_discover: - self.log.info( - f"Discovery needed for {len(configs_to_discover)}/{len(proposed_config)} " - f"switch(es) — {len(proposed_config) - len(configs_to_discover)} already in fabric" - ) - discovered_data = self.discovery.discover(configs_to_discover) - else: - self.log.info("All proposed switches already in fabric — skipping discovery") - discovered_data = {} - built = self.discovery.build_proposed( - proposed_config, discovered_data, list(self.existing) - ) - self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) - diff = SwitchDiffEngine.compute_changes( - list(self.proposed), list(self.existing), self.log + # Partition configs by operation type + poap_configs = [c for c in proposed_config if c.operation_type == "poap"] + rma_configs = [c for c in proposed_config if c.operation_type == "rma"] + normal_configs = [c for c in proposed_config if c.operation_type not in ("poap", "rma")] + + self.log.info( + f"Config partition: {len(normal_configs)} normal, " + f"{len(poap_configs)} poap, {len(rma_configs)} rma" ) - state_handlers = { - "merged": self._handle_merged_state, - "overridden": self._handle_overridden_state, - } - handler = state_handlers.get(self.state) - if handler is None: - self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") - return handler(diff, proposed_config, discovered_data) + # POAP and RMA are only valid with state=merged + if (poap_configs or rma_configs) and self.state != "merged": + self.nd.module.fail_json( + msg="POAP and RMA configs are only supported with state=merged" + ) + + # Normal discovery runs first so the fabric inventory is up to date + # before POAP/RMA handlers execute. + if normal_configs: + existing_ips = {sw.fabric_management_ip for sw in self.existing} + configs_to_discover = [cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips] + if configs_to_discover: + self.log.info( + f"Discovery needed for {len(configs_to_discover)}/{len(normal_configs)} " + f"switch(es) — {len(normal_configs) - len(configs_to_discover)} already in fabric" + ) + discovered_data = self.discovery.discover(configs_to_discover) + else: + self.log.info("All proposed switches already in fabric — skipping discovery") + discovered_data = {} + built = self.discovery.build_proposed( + normal_configs, discovered_data, list(self.existing) + ) + self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) + diff = SwitchDiffEngine.compute_changes( + list(self.proposed), list(self.existing), self.log + ) + + state_handlers = { + "merged": self._handle_merged_state, + "overridden": self._handle_overridden_state, + } + handler = state_handlers.get(self.state) + if handler is None: + self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") + handler(diff, normal_configs, discovered_data) + + # POAP and RMA run after normal discovery + if poap_configs: + self.poap_handler.handle(poap_configs, list(self.existing)) + if rma_configs: + self.rma_handler.handle(rma_configs, list(self.existing)) # ===================================================================== # State Handlers (orchestration only — delegate to services) diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 6b86a9b7..df0a53d7 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -17,7 +17,7 @@ version_added: "1.0.0" author: Akshayanat Chengam Saravanan (@achengam) description: -- Add, delete, override, and query switches in Cisco Nexus Dashboard. +- Add, delete, and override switches in Cisco Nexus Dashboard. - Supports normal discovery, POAP (bootstrap/preprovision), and RMA operations. - Uses Pydantic model validation for switch configurations. - Provides state-based operations with intelligent diff calculation. @@ -30,7 +30,7 @@ state: description: - The state of ND and switch(es) after module completion. - - C(merged) and C(query) are the only states supported for POAP. + - C(merged) is the only state supported for POAP. - C(merged) is the only state supported for RMA. type: str default: merged @@ -38,7 +38,6 @@ - merged - overridden - deleted - - query save: description: - Save/Recalculate the configuration of the fabric after inventory is updated. @@ -344,11 +343,6 @@ - seed_ip: 192.168.10.202 state: deleted -- name: Query all switches in fabric - cisco.nd.nd_manage_switches: - fabric: my-fabric - state: query - register: switches_result """ RETURN = """ @@ -364,7 +358,7 @@ elements: dict sent: description: The configuration sent to the API. - returned: when state is not query + returned: always type: list elements: dict current: @@ -479,7 +473,7 @@ def main(): state=dict( type="str", default="merged", - choices=["merged", "overridden", "deleted", "query"] + choices=["merged", "overridden", "deleted"] ), ) @@ -529,7 +523,7 @@ def main(): ) log.info(f"NDSwitchResourceModule initialized for fabric: {fabric}") - # Manage state for merged, overridden, deleted, query + # Manage state for merged, overridden, deleted log.info(f"Managing state: {state}") sw_module.manage_state() From 824b6c9fe2a27839bd2e127bc120e6461cb81073 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 19 Mar 2026 15:25:24 +0530 Subject: [PATCH 020/109] Fixing paths, docstrings, class names, adding UT for endpoints --- ...ials.py => manage_credentials_switches.py} | 4 +- .../fabric_config.py => manage_fabrics.py} | 104 +-- .../v1/manage/manage_fabrics_actions.py | 139 ++++ ...otstrap.py => manage_fabrics_bootstrap.py} | 24 +- ...scovery.py => manage_fabrics_inventory.py} | 40 +- ...ons.py => manage_fabrics_switchactions.py} | 256 +------- .../v1/manage/manage_fabrics_switches.py | 449 +++++++++++++ .../nd_manage_switches/fabric_switches.py | 256 -------- .../models/manage_switches/__init__.py | 141 ++++ .../manage_switches/bootstrap_models.py | 2 +- .../models/manage_switches/config_models.py | 2 +- .../manage_switches/discovery_models.py | 2 +- .../models/manage_switches/enums.py | 2 +- .../manage_switches/preprovision_models.py | 2 +- .../models/manage_switches/rma_models.py | 2 +- .../manage_switches/switch_actions_models.py | 2 +- .../manage_switches/switch_data_models.py | 2 +- .../models/manage_switches/validators.py | 2 +- plugins/module_utils/nd_switch_resources.py | 48 +- .../__init__.py | 10 +- .../bootstrap_utils.py | 8 +- .../exceptions.py | 2 +- .../fabric_utils.py | 10 +- .../payload_utils.py | 2 +- .../switch_helpers.py | 2 +- .../switch_wait_utils.py | 20 +- plugins/modules/nd_manage_switches.py | 6 +- ...ints_api_v1_manage_credentials_switches.py | 177 +++++ .../test_endpoints_api_v1_manage_fabrics.py | 271 ++++++++ ...endpoints_api_v1_manage_fabrics_actions.py | 162 +++++ ...dpoints_api_v1_manage_fabrics_bootstrap.py | 206 ++++++ ...dpoints_api_v1_manage_fabrics_inventory.py | 92 +++ ...nts_api_v1_manage_fabrics_switchactions.py | 491 ++++++++++++++ ...ndpoints_api_v1_manage_fabrics_switches.py | 614 ++++++++++++++++++ 34 files changed, 2874 insertions(+), 678 deletions(-) rename plugins/module_utils/endpoints/v1/manage/{nd_manage_switches/credentials.py => manage_credentials_switches.py} (96%) rename plugins/module_utils/endpoints/v1/manage/{nd_manage_switches/fabric_config.py => manage_fabrics.py} (66%) create mode 100644 plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py rename plugins/module_utils/endpoints/v1/manage/{nd_manage_switches/fabric_bootstrap.py => manage_fabrics_bootstrap.py} (81%) rename plugins/module_utils/endpoints/v1/manage/{nd_manage_switches/fabric_discovery.py => manage_fabrics_inventory.py} (57%) rename plugins/module_utils/endpoints/v1/manage/{nd_manage_switches/fabric_switch_actions.py => manage_fabrics_switchactions.py} (61%) create mode 100644 plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py delete mode 100644 plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py create mode 100644 plugins/module_utils/models/manage_switches/__init__.py rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/__init__.py (71%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/bootstrap_utils.py (92%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/exceptions.py (82%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/fabric_utils.py (94%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/payload_utils.py (96%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/switch_helpers.py (97%) rename plugins/module_utils/utils/{nd_manage_switches => manage_switches}/switch_wait_utils.py (97%) create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py similarity index 96% rename from plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py rename to plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py index ae40a17a..9609dc99 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/credentials.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ @@ -20,7 +20,7 @@ # pylint: disable=invalid-name __metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" # pylint: enable=invalid-name from typing import Literal diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py similarity index 66% rename from plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py rename to plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index b8a5d906..6541dccc 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_config.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -1,26 +1,24 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ -ND Manage Fabric Config endpoint models. +ND Manage Fabrics endpoint models. -This module contains endpoint definitions for fabric configuration operations +This module contains endpoint definitions for fabric-level operations in the ND Manage API. Endpoints covered: -- Config save (recalculate) - Config deploy - Get fabric info -- Inventory discover status """ from __future__ import absolute_import, annotations, division, print_function # pylint: disable=invalid-name __metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" # pylint: enable=invalid-name from typing import Literal, Optional @@ -67,9 +65,9 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") -class _EpManageFabricConfigBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): """ - Base class for Fabric Config endpoints. + Base class for Fabrics endpoints. Provides common functionality for all HTTP methods on the /api/v1/manage/fabrics/{fabricName} endpoint family. @@ -83,50 +81,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name) -class EpManageFabricConfigSavePost(_EpManageFabricConfigBase): - """ - # Summary - - Fabric Config Save Endpoint - - ## Description - - Endpoint to save (recalculate) fabric configuration. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/actions/configSave - - ## Verb - - - POST - - ## Usage - - ```python - request = EpManageFabricConfigSavePost() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - ``` - """ - - class_name: Literal["EpManageFabricConfigSavePost"] = Field( - default="EpManageFabricConfigSavePost", frozen=True, description="Class name for backward compatibility" - ) - - @property - def path(self) -> str: - """Build the endpoint path.""" - return f"{self._base_path}/actions/configSave" - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST - - -class EpManageFabricConfigDeployPost(_EpManageFabricConfigBase): +class EpManageFabricConfigDeployPost(_EpManageFabricsBase): """ # Summary @@ -199,7 +154,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricGet(_EpManageFabricConfigBase): +class EpManageFabricGet(_EpManageFabricsBase): """ # Summary @@ -240,46 +195,3 @@ def path(self) -> str: def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.GET - - -class EpManageFabricInventoryDiscoverGet(_EpManageFabricConfigBase): - """ - # Summary - - Fabric Inventory Discover Endpoint - - ## Description - - Endpoint to get discovery status for switches in a fabric. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/inventory/discover - - ## Verb - - - GET - - ## Usage - - ```python - request = EpManageFabricInventoryDiscoverGet() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - ``` - """ - - class_name: Literal["EpManageFabricInventoryDiscoverGet"] = Field( - default="EpManageFabricInventoryDiscoverGet", frozen=True, description="Class name for backward compatibility" - ) - - @property - def path(self) -> str: - """Build the endpoint path.""" - return f"{self._base_path}/inventory/discover" - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py new file mode 100644 index 00000000..5c2a72bb --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Discovery endpoint models. + +This module contains endpoint definitions for switch discovery operations +within fabrics in the ND Manage API. + +Endpoints covered: +- Shallow discovery +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class _EpManageFabricsActionsBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Actions endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/actions endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "actions") + + +class EpManageFabricsActionsShallowDiscoveryPost(_EpManageFabricsActionsBase): + """ + # Summary + + Shallow Discovery Endpoint + + ## Description + + Endpoint to shallow discover switches given seed switches with hop count. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery + + ## Verb + + - POST + + ## Usage + + ```python + request = EpManageFabricsActionsShallowDiscoveryPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpManageFabricsActionsShallowDiscoveryPost"] = Field( + default="EpManageFabricsActionsShallowDiscoveryPost", frozen=True, description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + return f"{self._base_path}/shallowDiscovery" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsActionsConfigSavePost(_EpManageFabricsActionsBase): + """ + # Summary + + Fabric Config Save Endpoint + + ## Description + + Endpoint to save (recalculate) fabric configuration. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/configSave + + ## Verb + + - POST + + ## Usage + + ```python + request = EpManageFabricsActionsConfigSavePost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpManageFabricsActionsConfigSavePost"] = Field( + default="EpManageFabricsActionsConfigSavePost", frozen=True, description="Class name for backward compatibility" + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + return f"{self._base_path}/configSave" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py similarity index 81% rename from plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py rename to plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py index 5bef2ff5..89dcb6a8 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ @@ -17,7 +17,7 @@ # pylint: disable=invalid-name __metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" # pylint: enable=invalid-name from typing import Literal, Optional @@ -43,7 +43,7 @@ ) -class FabricBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): +class FabricsBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): """ # Summary @@ -58,14 +58,14 @@ class FabricBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, Endpoint ## Usage ```python - params = FabricBootstrapEndpointParams(max=50, offset=0) + params = FabricsBootstrapEndpointParams(max=50, offset=0) query_string = params.to_query_string() # Returns: "max=50&offset=0" ``` """ -class _EpManageFabricBootstrapBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricsBootstrapBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Bootstrap endpoints. @@ -81,7 +81,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "bootstrap") -class EpManageFabricBootstrapGet(_EpManageFabricBootstrapBase): +class EpManageFabricsBootstrapGet(_EpManageFabricsBootstrapBase): """ # Summary @@ -110,13 +110,13 @@ class EpManageFabricBootstrapGet(_EpManageFabricBootstrapBase): ```python # List all bootstrap switches - request = EpManageFabricBootstrapGet() + request = EpManageFabricsBootstrapGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb # List with pagination - request = EpManageFabricBootstrapGet() + request = EpManageFabricsBootstrapGet() request.fabric_name = "MyFabric" request.endpoint_params.max = 50 request.endpoint_params.offset = 0 @@ -126,11 +126,11 @@ class EpManageFabricBootstrapGet(_EpManageFabricBootstrapBase): ``` """ - class_name: Literal["EpManageFabricBootstrapGet"] = Field( - default="EpManageFabricBootstrapGet", frozen=True, description="Class name for backward compatibility" + class_name: Literal["EpManageFabricsBootstrapGet"] = Field( + default="EpManageFabricsBootstrapGet", frozen=True, description="Class name for backward compatibility" ) - endpoint_params: FabricBootstrapEndpointParams = Field( - default_factory=FabricBootstrapEndpointParams, description="Endpoint-specific query parameters" + endpoint_params: FabricsBootstrapEndpointParams = Field( + default_factory=FabricsBootstrapEndpointParams, description="Endpoint-specific query parameters" ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py similarity index 57% rename from plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py rename to plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py index 8bfc45d9..5cad5a42 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_discovery.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py @@ -1,23 +1,23 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ -ND Manage Fabric Discovery endpoint models. +ND Manage Fabrics Inventory endpoint models. -This module contains endpoint definitions for switch discovery operations -within fabrics in the ND Manage API. +This module contains endpoint definitions for fabric inventory operations +in the ND Manage API. Endpoints covered: -- Shallow discovery +- Inventory discover status """ from __future__ import absolute_import, annotations, division, print_function # pylint: disable=invalid-name __metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" # pylint: enable=invalid-name from typing import Literal @@ -37,12 +37,12 @@ ) -class _EpManageFabricDiscoveryBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricsInventoryBase(FabricNameMixin, NDEndpointBaseModel): """ - Base class for Fabric Discovery endpoints. + Base class for Fabric Inventory endpoints. Provides common functionality for all HTTP methods on the - /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery endpoint. + /api/v1/manage/fabrics/{fabricName}/inventory endpoint family. """ @property @@ -50,47 +50,47 @@ def _base_path(self) -> str: """Build the base endpoint path.""" if self.fabric_name is None: raise ValueError("fabric_name must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name, "actions", "shallowDiscovery") + return BasePath.path("fabrics", self.fabric_name) -class EpManageFabricShallowDiscoveryPost(_EpManageFabricDiscoveryBase): +class EpManageFabricsInventoryDiscoverGet(_EpManageFabricsInventoryBase): """ # Summary - Shallow Discovery Endpoint + Fabric Inventory Discover Endpoint ## Description - Endpoint to shallow discover switches given seed switches with hop count. + Endpoint to get discovery status for switches in a fabric. ## Path - - /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery + - /api/v1/manage/fabrics/{fabricName}/inventory/discover ## Verb - - POST + - GET ## Usage ```python - request = EpManageFabricShallowDiscoveryPost() + request = EpManageFabricsInventoryDiscoverGet() request.fabric_name = "MyFabric" path = request.path verb = request.verb ``` """ - class_name: Literal["EpManageFabricShallowDiscoveryPost"] = Field( - default="EpManageFabricShallowDiscoveryPost", frozen=True, description="Class name for backward compatibility" + class_name: Literal["EpManageFabricsInventoryDiscoverGet"] = Field( + default="EpManageFabricsInventoryDiscoverGet", frozen=True, description="Class name for backward compatibility" ) @property def path(self) -> str: """Build the endpoint path.""" - return self._base_path + return f"{self._base_path}/inventory/discover" @property def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py similarity index 61% rename from plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py rename to plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py index 5a455091..7613140d 100644 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switch_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ @@ -14,15 +14,14 @@ - Change switch roles (bulk) - Import bootstrap (POAP) - Pre-provision switches -- Provision RMA -- Change switch serial number +- Rediscover switches """ from __future__ import absolute_import, annotations, division, print_function # pylint: disable=invalid-name __metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" # pylint: enable=invalid-name from typing import Literal, Optional @@ -31,7 +30,6 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( ClusterNameMixin, FabricNameMixin, - SwitchSerialNumberMixin, TicketIdMixin, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( @@ -122,7 +120,7 @@ class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, Endpoin # ============================================================================ -class _EpManageFabricSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): +class _EpManageFabricsSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for Fabric Switch Actions endpoints. @@ -138,7 +136,7 @@ def _base_path(self) -> str: return BasePath.path("fabrics", self.fabric_name, "switchActions") -class EpManageFabricSwitchActionsRemovePost(_EpManageFabricSwitchActionsBase): +class EpManageFabricsSwitchActionsRemovePost(_EpManageFabricsSwitchActionsBase): """ # Summary @@ -166,13 +164,13 @@ class EpManageFabricSwitchActionsRemovePost(_EpManageFabricSwitchActionsBase): ```python # Remove switches - request = EpManageFabricSwitchActionsRemovePost() + request = EpManageFabricsSwitchActionsRemovePost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Remove switches with force and ticket - request = EpManageFabricSwitchActionsRemovePost() + request = EpManageFabricsSwitchActionsRemovePost() request.fabric_name = "MyFabric" request.endpoint_params.force = True request.endpoint_params.ticket_id = "CHG12345" @@ -182,8 +180,8 @@ class EpManageFabricSwitchActionsRemovePost(_EpManageFabricSwitchActionsBase): ``` """ - class_name: Literal["EpManageFabricSwitchActionsRemovePost"] = Field( - default="EpManageFabricSwitchActionsRemovePost", frozen=True, description="Class name for backward compatibility" + class_name: Literal["EpManageFabricsSwitchActionsRemovePost"] = Field( + default="EpManageFabricsSwitchActionsRemovePost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsRemoveEndpointParams = Field( default_factory=SwitchActionsRemoveEndpointParams, description="Endpoint-specific query parameters" @@ -212,7 +210,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricSwitchActionsChangeRolesPost(_EpManageFabricSwitchActionsBase): +class EpManageFabricsSwitchActionsChangeRolesPost(_EpManageFabricsSwitchActionsBase): """ # Summary @@ -239,13 +237,13 @@ class EpManageFabricSwitchActionsChangeRolesPost(_EpManageFabricSwitchActionsBas ```python # Change roles - request = EpManageFabricSwitchActionsChangeRolesPost() + request = EpManageFabricsSwitchActionsChangeRolesPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Change roles with change control ticket - request = EpManageFabricSwitchActionsChangeRolesPost() + request = EpManageFabricsSwitchActionsChangeRolesPost() request.fabric_name = "MyFabric" request.endpoint_params.ticket_id = "CHG12345" path = request.path @@ -254,8 +252,8 @@ class EpManageFabricSwitchActionsChangeRolesPost(_EpManageFabricSwitchActionsBas ``` """ - class_name: Literal["EpManageFabricSwitchActionsChangeRolesPost"] = Field( - default="EpManageFabricSwitchActionsChangeRolesPost", frozen=True, + class_name: Literal["EpManageFabricsSwitchActionsChangeRolesPost"] = Field( + default="EpManageFabricsSwitchActionsChangeRolesPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( @@ -285,7 +283,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricSwitchActionsImportBootstrapPost(_EpManageFabricSwitchActionsBase): +class EpManageFabricsSwitchActionsImportBootstrapPost(_EpManageFabricsSwitchActionsBase): """ # Summary @@ -313,13 +311,13 @@ class EpManageFabricSwitchActionsImportBootstrapPost(_EpManageFabricSwitchAction ```python # Import bootstrap switches - request = EpManageFabricSwitchActionsImportBootstrapPost() + request = EpManageFabricsSwitchActionsImportBootstrapPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Import with cluster and ticket - request = EpManageFabricSwitchActionsImportBootstrapPost() + request = EpManageFabricsSwitchActionsImportBootstrapPost() request.fabric_name = "MyFabric" request.endpoint_params.cluster_name = "cluster1" request.endpoint_params.ticket_id = "CHG12345" @@ -329,8 +327,8 @@ class EpManageFabricSwitchActionsImportBootstrapPost(_EpManageFabricSwitchAction ``` """ - class_name: Literal["EpManageFabricSwitchActionsImportBootstrapPost"] = Field( - default="EpManageFabricSwitchActionsImportBootstrapPost", frozen=True, description="Class name for backward compatibility" + class_name: Literal["EpManageFabricsSwitchActionsImportBootstrapPost"] = Field( + default="EpManageFabricsSwitchActionsImportBootstrapPost", frozen=True, description="Class name for backward compatibility" ) endpoint_params: SwitchActionsImportEndpointParams = Field( default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" @@ -364,7 +362,7 @@ def verb(self) -> HttpVerbEnum: # ============================================================================ -class EpManageFabricSwitchActionsPreProvisionPost(_EpManageFabricSwitchActionsBase): +class EpManageFabricsSwitchActionsPreProvisionPost(_EpManageFabricsSwitchActionsBase): """ # Summary @@ -395,13 +393,13 @@ class EpManageFabricSwitchActionsPreProvisionPost(_EpManageFabricSwitchActionsBa ```python # Pre-provision switches - request = EpManageFabricSwitchActionsPreProvisionPost() + request = EpManageFabricsSwitchActionsPreProvisionPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Pre-provision with cluster and ticket - request = EpManageFabricSwitchActionsPreProvisionPost() + request = EpManageFabricsSwitchActionsPreProvisionPost() request.fabric_name = "MyFabric" request.endpoint_params.cluster_name = "cluster1" request.endpoint_params.ticket_id = "CHG12345" @@ -411,8 +409,8 @@ class EpManageFabricSwitchActionsPreProvisionPost(_EpManageFabricSwitchActionsBa ``` """ - class_name: Literal["EpManageFabricSwitchActionsPreProvisionPost"] = Field( - default="EpManageFabricSwitchActionsPreProvisionPost", frozen=True, + class_name: Literal["EpManageFabricsSwitchActionsPreProvisionPost"] = Field( + default="EpManageFabricsSwitchActionsPreProvisionPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsImportEndpointParams = Field( @@ -442,208 +440,12 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -# ============================================================================ -# RMA (Return Material Authorization) Endpoints -# ============================================================================ - - -class _EpManageFabricSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): - """ - Base class for per-switch action endpoints. - - Provides common functionality for all HTTP methods on the - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions endpoint. - """ - - @property - def _base_path(self) -> str: - """Build the base endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - if self.switch_sn is None: - raise ValueError("switch_sn must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") - - -class EpManageFabricSwitchProvisionRMAPost(_EpManageFabricSwitchActionsPerSwitchBase): - """ - # Summary - - Provision RMA for Switch Endpoint - - ## Description - - Endpoint to RMA (Return Material Authorization) an existing switch with a new bootstrapped switch. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA - - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA?ticketId=CHG12345 - - ## Verb - - - POST - - ## Query Parameters - - - ticket_id: Change control ticket ID (optional) - - ## Usage - - ```python - # Provision RMA - request = EpManageFabricSwitchProvisionRMAPost() - request.fabric_name = "MyFabric" - request.switch_sn = "SAL1948TRTT" - path = request.path - verb = request.verb - - # Provision RMA with change control ticket - request = EpManageFabricSwitchProvisionRMAPost() - request.fabric_name = "MyFabric" - request.switch_sn = "SAL1948TRTT" - request.endpoint_params.ticket_id = "CHG12345" - path = request.path - verb = request.verb - # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345 - ``` - """ - - class_name: Literal["EpManageFabricSwitchProvisionRMAPost"] = Field( - default="EpManageFabricSwitchProvisionRMAPost", frozen=True, description="Class name for backward compatibility" - ) - endpoint_params: SwitchActionsTicketEndpointParams = Field( - default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" - ) - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path with optional query string. - - ## Returns - - - Complete endpoint path string, optionally including query parameters - """ - base = f"{self._base_path}/provisionRMA" - query_string = self.endpoint_params.to_query_string() - if query_string: - return f"{base}?{query_string}" - return base - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST - - -# ============================================================================ -# Change Switch Serial Number Endpoints -# ============================================================================ - - -class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): - """ - # Summary - - Endpoint-specific query parameters for switch action endpoints that accept only a cluster name. - - ## Parameters - - - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) - - ## Usage - - ```python - params = SwitchActionsClusterEndpointParams(cluster_name="cluster1") - query_string = params.to_query_string() - # Returns: "clusterName=cluster1" - ``` - """ - - -class EpManageFabricSwitchChangeSerialNumberPost(_EpManageFabricSwitchActionsPerSwitchBase): - """ - # Summary - - Change Switch Serial Number Endpoint - - ## Description - - Endpoint to change the serial number for a pre-provisioned switch. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber - - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber?clusterName=cluster1 - - ## Verb - - - POST - - ## Query Parameters - - - cluster_name: Target cluster name for multi-cluster deployments (optional) - - ## Usage - - ```python - # Change serial number - request = EpManageFabricSwitchChangeSerialNumberPost() - request.fabric_name = "MyFabric" - request.switch_sn = "SAL1948TRTT" - path = request.path - verb = request.verb - - # Change serial number with cluster name - request = EpManageFabricSwitchChangeSerialNumberPost() - request.fabric_name = "MyFabric" - request.switch_sn = "SAL1948TRTT" - request.endpoint_params.cluster_name = "cluster1" - path = request.path - verb = request.verb - # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1 - ``` - """ - - class_name: Literal["EpManageFabricSwitchChangeSerialNumberPost"] = Field( - default="EpManageFabricSwitchChangeSerialNumberPost", frozen=True, description="Class name for backward compatibility" - ) - endpoint_params: SwitchActionsClusterEndpointParams = Field( - default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" - ) - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path with optional query string. - - ## Returns - - - Complete endpoint path string, optionally including query parameters - """ - base = f"{self._base_path}/changeSwitchSerialNumber" - query_string = self.endpoint_params.to_query_string() - if query_string: - return f"{base}?{query_string}" - return base - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST - - # ============================================================================ # Rediscover Endpoints # ============================================================================ -class EpManageFabricSwitchActionsRediscoverPost(_EpManageFabricSwitchActionsBase): +class EpManageFabricsSwitchActionsRediscoverPost(_EpManageFabricsSwitchActionsBase): """ # Summary @@ -670,13 +472,13 @@ class EpManageFabricSwitchActionsRediscoverPost(_EpManageFabricSwitchActionsBase ```python # Rediscover switches - request = EpManageFabricSwitchActionsRediscoverPost() + request = EpManageFabricsSwitchActionsRediscoverPost() request.fabric_name = "MyFabric" path = request.path verb = request.verb # Rediscover switches with change control ticket - request = EpManageFabricSwitchActionsRediscoverPost() + request = EpManageFabricsSwitchActionsRediscoverPost() request.fabric_name = "MyFabric" request.endpoint_params.ticket_id = "CHG12345" path = request.path @@ -685,8 +487,8 @@ class EpManageFabricSwitchActionsRediscoverPost(_EpManageFabricSwitchActionsBase ``` """ - class_name: Literal["EpManageFabricSwitchActionsRediscoverPost"] = Field( - default="EpManageFabricSwitchActionsRediscoverPost", frozen=True, + class_name: Literal["EpManageFabricsSwitchActionsRediscoverPost"] = Field( + default="EpManageFabricsSwitchActionsRediscoverPost", frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py new file mode 100644 index 00000000..485747ec --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Switches endpoint models. + +This module contains endpoint definitions for switch CRUD operations +within fabrics in the ND Manage API. + +Endpoints covered: +- List switches in a fabric +- Add switches to a fabric +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, + FabricNameMixin, + FilterMixin, + MaxMixin, + OffsetMixin, + SwitchSerialNumberMixin, + TicketIdMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + +class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for list fabric switches endpoint. + + ## Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional, from `MaxMixin`) + - offset: Pagination offset (optional, from `OffsetMixin`) + - filter: Lucene filter expression (optional, from `FilterMixin`) + + ## Usage + + ```python + params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) + query_string = params.to_query_string() + # Returns: "hostname=leaf1&max=100" + ``` + """ + + hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") + + +class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for add switches to fabric endpoint. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1&ticketId=CHG12345" + ``` + """ + + +class _EpManageFabricsSwitchesBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Switches endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switches") + + +class EpManageFabricsSwitchesGet(_EpManageFabricsSwitchesBase): + """ + # Summary + + List Fabric Switches Endpoint + + ## Description + + Endpoint to list all switches in a specific fabric with optional filtering. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?hostname=leaf1&max=100 + + ## Verb + + - GET + + ## Query Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + # List all switches + request = EpManageFabricsSwitchesGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # List with filtering + request = EpManageFabricsSwitchesGet() + request.fabric_name = "MyFabric" + request.endpoint_params.hostname = "leaf1" + request.endpoint_params.max = 100 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1&max=100 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchesGet"] = Field( + default="EpManageFabricsSwitchesGet", frozen=True, description="Class name for backward compatibility" + ) + endpoint_params: FabricSwitchesGetEndpointParams = Field( + default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class EpManageFabricsSwitchesPost(_EpManageFabricsSwitchesBase): + """ + # Summary + + Add Switches to Fabric Endpoint + + ## Description + + Endpoint to add switches to a specific fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Add switches + request = EpManageFabricsSwitchesPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Add switches with cluster and ticket + request = EpManageFabricsSwitchesPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchesPost"] = Field( + default="EpManageFabricsSwitchesPost", frozen=True, description="Class name for backward compatibility" + ) + endpoint_params: FabricSwitchesAddEndpointParams = Field( + default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Per-Switch Action Endpoints +# ============================================================================ + +class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept a ticket ID. + + ## Parameters + + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = SwitchActionsTicketEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + +class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept only a cluster name. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + + ## Usage + + ```python + params = SwitchActionsClusterEndpointParams(cluster_name="cluster1") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1" + ``` + """ + +class _EpManageFabricsSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): + """ + Base class for per-switch action endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") + + +class EpManageFabricsSwitchProvisionRMAPost(_EpManageFabricsSwitchActionsPerSwitchBase): + """ + # Summary + + Provision RMA for Switch Endpoint + + ## Description + + Endpoint to RMA (Return Material Authorization) an existing switch with a new bootstrapped switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Provision RMA + request = EpManageFabricsSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Provision RMA with change control ticket + request = EpManageFabricsSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchProvisionRMAPost"] = Field( + default="EpManageFabricsSwitchProvisionRMAPost", frozen=True, description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + base = f"{self._base_path}/provisionRMA" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsSwitchChangeSerialNumberPost(_EpManageFabricsSwitchActionsPerSwitchBase): + """ + # Summary + + Change Switch Serial Number Endpoint + + ## Description + + Endpoint to change the serial number for a pre-provisioned switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber?clusterName=cluster1 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + + ## Usage + + ```python + # Change serial number + request = EpManageFabricsSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Change serial number with cluster name + request = EpManageFabricsSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.cluster_name = "cluster1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchChangeSerialNumberPost"] = Field( + default="EpManageFabricsSwitchChangeSerialNumberPost", frozen=True, description="Class name for backward compatibility" + ) + endpoint_params: SwitchActionsClusterEndpointParams = Field( + default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + base = f"{self._base_path}/changeSwitchSerialNumber" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py b/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py deleted file mode 100644 index a1498cb6..00000000 --- a/plugins/module_utils/endpoints/v1/manage/nd_manage_switches/fabric_switches.py +++ /dev/null @@ -1,256 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -""" -ND Manage Fabric Switches endpoint models. - -This module contains endpoint definitions for switch CRUD operations -within fabrics in the ND Manage API. - -Endpoints covered: -- List switches in a fabric -- Add switches to a fabric -""" - -from __future__ import absolute_import, annotations, division, print_function - -# pylint: disable=invalid-name -__metaclass__ = type -__author__ = "Akshayanat Chengam Saravanan" -# pylint: enable=invalid-name - -from typing import Literal, Optional - -from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( - ClusterNameMixin, - FabricNameMixin, - FilterMixin, - MaxMixin, - OffsetMixin, - TicketIdMixin, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( - EndpointQueryParams, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( - BasePath, -) -from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - Field, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( - NDEndpointBaseModel, -) - - -class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): - """ - # Summary - - Endpoint-specific query parameters for list fabric switches endpoint. - - ## Parameters - - - hostname: Filter by switch hostname (optional) - - max: Maximum number of results (optional, from `MaxMixin`) - - offset: Pagination offset (optional, from `OffsetMixin`) - - filter: Lucene filter expression (optional, from `FilterMixin`) - - ## Usage - - ```python - params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) - query_string = params.to_query_string() - # Returns: "hostname=leaf1&max=100" - ``` - """ - - hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") - - -class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): - """ - # Summary - - Endpoint-specific query parameters for add switches to fabric endpoint. - - ## Parameters - - - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) - - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) - - ## Usage - - ```python - params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") - query_string = params.to_query_string() - # Returns: "clusterName=cluster1&ticketId=CHG12345" - ``` - """ - - -class _EpManageFabricSwitchesBase(FabricNameMixin, NDEndpointBaseModel): - """ - Base class for Fabric Switches endpoints. - - Provides common functionality for all HTTP methods on the - /api/v1/manage/fabrics/{fabricName}/switches endpoint. - """ - - @property - def _base_path(self) -> str: - """Build the base endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name, "switches") - - -class EpManageFabricSwitchesGet(_EpManageFabricSwitchesBase): - """ - # Summary - - List Fabric Switches Endpoint - - ## Description - - Endpoint to list all switches in a specific fabric with optional filtering. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/switches - - /api/v1/manage/fabrics/{fabricName}/switches?hostname=leaf1&max=100 - - ## Verb - - - GET - - ## Query Parameters - - - hostname: Filter by switch hostname (optional) - - max: Maximum number of results (optional) - - offset: Pagination offset (optional) - - filter: Lucene filter expression (optional) - - ## Usage - - ```python - # List all switches - request = EpManageFabricSwitchesGet() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - - # List with filtering - request = EpManageFabricSwitchesGet() - request.fabric_name = "MyFabric" - request.endpoint_params.hostname = "leaf1" - request.endpoint_params.max = 100 - path = request.path - verb = request.verb - # Path will be: /api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1&max=100 - ``` - """ - - class_name: Literal["EpManageFabricSwitchesGet"] = Field( - default="EpManageFabricSwitchesGet", frozen=True, description="Class name for backward compatibility" - ) - endpoint_params: FabricSwitchesGetEndpointParams = Field( - default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" - ) - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path with optional query string. - - ## Returns - - - Complete endpoint path string, optionally including query parameters - """ - query_string = self.endpoint_params.to_query_string() - if query_string: - return f"{self._base_path}?{query_string}" - return self._base_path - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.GET - - -class EpManageFabricSwitchesPost(_EpManageFabricSwitchesBase): - """ - # Summary - - Add Switches to Fabric Endpoint - - ## Description - - Endpoint to add switches to a specific fabric. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/switches - - /api/v1/manage/fabrics/{fabricName}/switches?clusterName=cluster1&ticketId=CHG12345 - - ## Verb - - - POST - - ## Query Parameters - - - cluster_name: Target cluster name for multi-cluster deployments (optional) - - ticket_id: Change control ticket ID (optional) - - ## Usage - - ```python - # Add switches - request = EpManageFabricSwitchesPost() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - - # Add switches with cluster and ticket - request = EpManageFabricSwitchesPost() - request.fabric_name = "MyFabric" - request.endpoint_params.cluster_name = "cluster1" - request.endpoint_params.ticket_id = "CHG12345" - path = request.path - verb = request.verb - # Path will be: /api/v1/manage/fabrics/MyFabric/switches?clusterName=cluster1&ticketId=CHG12345 - ``` - """ - - class_name: Literal["EpManageFabricSwitchesPost"] = Field( - default="EpManageFabricSwitchesPost", frozen=True, description="Class name for backward compatibility" - ) - endpoint_params: FabricSwitchesAddEndpointParams = Field( - default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" - ) - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path with optional query string. - - ## Returns - - - Complete endpoint path string, optionally including query parameters - """ - query_string = self.endpoint_params.to_query_string() - if query_string: - return f"{self._base_path}?{query_string}" - return self._base_path - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST diff --git a/plugins/module_utils/models/manage_switches/__init__.py b/plugins/module_utils/models/manage_switches/__init__.py new file mode 100644 index 00000000..38e667a8 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/__init__.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""nd_manage_switches models package. + +Re-exports all model classes, enums, and validators from their individual +modules so that consumers can import directly from the package: + + from .models.nd_manage_switches import SwitchConfigModel, SwitchRole, ... +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +# --- Enums --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( # noqa: F401 + AdvisoryLevel, + AnomalyLevel, + ConfigSyncStatus, + DiscoveryStatus, + PlatformType, + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, + SystemMode, + VpcRole, +) + +# --- Validators --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators # noqa: F401 + +# --- Nested / shared models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( # noqa: F401 + AdditionalAciSwitchData, + AdditionalSwitchData, + Metadata, + SwitchMetadata, + TelemetryIpCollection, + VpcData, +) + +# --- Discovery models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.discovery_models import ( # noqa: F401 + AddSwitchesRequestModel, + ShallowDiscoveryRequestModel, + SwitchDiscoveryModel, +) + +# --- Switch data models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( # noqa: F401 + SwitchDataModel, +) + +# --- Bootstrap models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.bootstrap_models import ( # noqa: F401 + BootstrapBaseData, + BootstrapBaseModel, + BootstrapCredentialModel, + BootstrapImportSpecificModel, + BootstrapImportSwitchModel, + ImportBootstrapSwitchesRequestModel, +) + +# --- Preprovision models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.preprovision_models import ( # noqa: F401 + PreProvisionSwitchesRequestModel, + PreProvisionSwitchModel, +) + +# --- RMA models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.rma_models import ( # noqa: F401 + RMASwitchModel, +) + +# --- Switch actions models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_actions_models import ( # noqa: F401 + ChangeSwitchSerialNumberRequestModel, + SwitchCredentialsRequestModel, +) + +# --- Config / playbook models --- +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( # noqa: F401 + ConfigDataModel, + POAPConfigModel, + RMAConfigModel, + SwitchConfigModel, +) + + +__all__ = [ + # Enums + "AdvisoryLevel", + "AnomalyLevel", + "ConfigSyncStatus", + "DiscoveryStatus", + "PlatformType", + "RemoteCredentialStore", + "SnmpV3AuthProtocol", + "SwitchRole", + "SystemMode", + "VpcRole", + # Validators + "SwitchValidators", + # Nested models + "AdditionalAciSwitchData", + "AdditionalSwitchData", + "Metadata", + "SwitchMetadata", + "TelemetryIpCollection", + "VpcData", + # Discovery models + "AddSwitchesRequestModel", + "ShallowDiscoveryRequestModel", + "SwitchDiscoveryModel", + # Switch data models + "SwitchDataModel", + # Bootstrap models + "BootstrapBaseData", + "BootstrapBaseModel", + "BootstrapCredentialModel", + "BootstrapImportSpecificModel", + "BootstrapImportSwitchModel", + "ImportBootstrapSwitchesRequestModel", + # Preprovision models + "PreProvisionSwitchesRequestModel", + "PreProvisionSwitchModel", + # RMA models + "RMASwitchModel", + # Switch actions models + "ChangeSwitchSerialNumberRequestModel", + "SwitchCredentialsRequestModel", + # Config models + "ConfigDataModel", + "POAPConfigModel", + "RMAConfigModel", + "SwitchConfigModel", +] diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index 0d72ebed..224d7fa9 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index b596ca6f..94336143 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index dfe190f0..1475edf8 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py index b88216ad..0d3f85cc 100644 --- a/plugins/module_utils/models/manage_switches/enums.py +++ b/plugins/module_utils/models/manage_switches/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index ba073824..4425e486 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index 7760d11b..7585d222 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index 5f903e65..8c1d7bb6 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index e4de26cb..9be8b22d 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/models/manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py index e3ceb3a6..b2e3a704 100644 --- a/plugins/module_utils/models/manage_switches/validators.py +++ b/plugins/module_utils/models/manage_switches/validators.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 638e870d..5fdb2c47 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -48,7 +48,7 @@ POAPConfigModel, RMAConfigModel, ) -from ansible_collections.cisco.nd.plugins.module_utils.utils.nd_manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.utils.manage_switches import ( FabricUtils, SwitchWaitUtils, SwitchOperationError, @@ -59,22 +59,22 @@ build_bootstrap_index, build_poap_data_block, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( - EpManageFabricSwitchesGet, - EpManageFabricSwitchesPost, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, + EpManageFabricsSwitchesPost, + EpManageFabricsSwitchProvisionRMAPost, + EpManageFabricsSwitchChangeSerialNumberPost, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_discovery import ( - EpManageFabricShallowDiscoveryPost, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsShallowDiscoveryPost, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( - EpManageFabricSwitchProvisionRMAPost, - EpManageFabricSwitchActionsImportBootstrapPost, - EpManageFabricSwitchActionsPreProvisionPost, - EpManageFabricSwitchActionsRemovePost, - EpManageFabricSwitchActionsChangeRolesPost, - EpManageFabricSwitchChangeSerialNumberPost, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsImportBootstrapPost, + EpManageFabricsSwitchActionsPreProvisionPost, + EpManageFabricsSwitchActionsRemovePost, + EpManageFabricsSwitchActionsChangeRolesPost, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.credentials import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_credentials_switches import ( EpManageCredentialsSwitchesPost, ) @@ -562,7 +562,7 @@ def bulk_discover( log.debug("ENTER: bulk_discover()") log.debug(f"Discovering {len(switches)} switches in bulk") - endpoint = EpManageFabricShallowDiscoveryPost() + endpoint = EpManageFabricsActionsShallowDiscoveryPost() endpoint.fabric_name = self.ctx.fabric seed_ips = [switch.seed_ip for switch in switches] @@ -775,7 +775,7 @@ def bulk_add( log.debug("ENTER: bulk_add()") log.debug(f"Adding {len(switches)} switches to fabric") - endpoint = EpManageFabricSwitchesPost() + endpoint = EpManageFabricsSwitchesPost() endpoint.fabric_name = self.ctx.fabric switch_discoveries = [] @@ -903,7 +903,7 @@ def bulk_delete( log.debug("EXIT: bulk_delete() - nothing to delete") return [] - endpoint = EpManageFabricSwitchActionsRemovePost() + endpoint = EpManageFabricsSwitchActionsRemovePost() endpoint.fabric_name = self.ctx.fabric payload = {"switchIds": serial_numbers} @@ -1041,7 +1041,7 @@ def bulk_update_roles( log.debug("EXIT: bulk_update_roles() - no roles to update") return - endpoint = EpManageFabricSwitchActionsChangeRolesPost() + endpoint = EpManageFabricsSwitchActionsChangeRolesPost() endpoint.fabric_name = self.ctx.fabric payload = {"switchRoles": switch_roles} @@ -1542,7 +1542,7 @@ def _import_bootstrap_switches( log.debug("ENTER: _import_bootstrap_switches()") - endpoint = EpManageFabricSwitchActionsImportBootstrapPost() + endpoint = EpManageFabricsSwitchActionsImportBootstrapPost() endpoint.fabric_name = self.ctx.fabric request_model = ImportBootstrapSwitchesRequestModel(switches=models) @@ -1663,7 +1663,7 @@ def _preprovision_switches( log.debug("ENTER: _preprovision_switches()") - endpoint = EpManageFabricSwitchActionsPreProvisionPost() + endpoint = EpManageFabricsSwitchActionsPreProvisionPost() endpoint.fabric_name = self.ctx.fabric request_model = PreProvisionSwitchesRequestModel(switches=models) @@ -1797,7 +1797,7 @@ def _handle_poap_swap( f"{old_serial} → {new_serial}" ) - endpoint = EpManageFabricSwitchChangeSerialNumberPost() + endpoint = EpManageFabricsSwitchChangeSerialNumberPost() endpoint.fabric_name = fabric endpoint.switch_sn = old_serial @@ -2295,7 +2295,7 @@ def _provision_rma_switch( log.debug("ENTER: _provision_rma_switch()") - endpoint = EpManageFabricSwitchProvisionRMAPost() + endpoint = EpManageFabricsSwitchProvisionRMAPost() endpoint.fabric_name = self.ctx.fabric endpoint.switch_sn = old_switch_id @@ -2976,7 +2976,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: Returns: List of raw switch dictionaries returned by the controller. """ - endpoint = EpManageFabricSwitchesGet() + endpoint = EpManageFabricsSwitchesGet() endpoint.fabric_name = self.fabric self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") self.log.debug(f"Query verb: {endpoint.verb}") diff --git a/plugins/module_utils/utils/nd_manage_switches/__init__.py b/plugins/module_utils/utils/manage_switches/__init__.py similarity index 71% rename from plugins/module_utils/utils/nd_manage_switches/__init__.py rename to plugins/module_utils/utils/manage_switches/__init__.py index ff3d215b..bb142fe1 100644 --- a/plugins/module_utils/utils/nd_manage_switches/__init__.py +++ b/plugins/module_utils/utils/manage_switches/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Akshayant Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -9,19 +9,13 @@ Re-exports all utility classes, functions, and exceptions so that consumers can import directly from the package: - from .utils.nd_manage_switches import ( - SwitchOperationError, PayloadUtils, FabricUtils, SwitchWaitUtils, - mask_password, get_switch_field, determine_operation_type, - group_switches_by_credentials, query_bootstrap_switches, - build_bootstrap_index, build_poap_data_block, - ) """ from __future__ import absolute_import, division, print_function __metaclass__ = type -from .exceptions import SwitchOperationError # noqa: F401 +from ansible_collections.cisco.nd.plugins.module_utils.utils.manage_switches.exceptions import SwitchOperationError # noqa: F401 from .payload_utils import PayloadUtils, mask_password # noqa: F401 from .fabric_utils import FabricUtils # noqa: F401 from .switch_wait_utils import SwitchWaitUtils # noqa: F401 diff --git a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py b/plugins/module_utils/utils/manage_switches/bootstrap_utils.py similarity index 92% rename from plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py rename to plugins/module_utils/utils/manage_switches/bootstrap_utils.py index b3e58c57..d78d2531 100644 --- a/plugins/module_utils/utils/nd_manage_switches/bootstrap_utils.py +++ b/plugins/module_utils/utils/manage_switches/bootstrap_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or # https://www.gnu.org/licenses/gpl-3.0.txt) @@ -14,8 +14,8 @@ import logging from typing import Any, Dict, List, Optional -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_bootstrap import ( - EpManageFabricBootstrapGet, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( + EpManageFabricsBootstrapGet, ) @@ -36,7 +36,7 @@ def query_bootstrap_switches( """ log.debug("ENTER: query_bootstrap_switches()") - endpoint = EpManageFabricBootstrapGet() + endpoint = EpManageFabricsBootstrapGet() endpoint.fabric_name = fabric log.debug(f"Bootstrap endpoint: {endpoint.path}") diff --git a/plugins/module_utils/utils/nd_manage_switches/exceptions.py b/plugins/module_utils/utils/manage_switches/exceptions.py similarity index 82% rename from plugins/module_utils/utils/nd_manage_switches/exceptions.py rename to plugins/module_utils/utils/manage_switches/exceptions.py index 09d7ebb5..8e5b0055 100644 --- a/plugins/module_utils/utils/nd_manage_switches/exceptions.py +++ b/plugins/module_utils/utils/manage_switches/exceptions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py b/plugins/module_utils/utils/manage_switches/fabric_utils.py similarity index 94% rename from plugins/module_utils/utils/nd_manage_switches/fabric_utils.py rename to plugins/module_utils/utils/manage_switches/fabric_utils.py index 244f2b46..ab4557da 100644 --- a/plugins/module_utils/utils/nd_manage_switches/fabric_utils.py +++ b/plugins/module_utils/utils/manage_switches/fabric_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -14,11 +14,13 @@ import time from typing import Any, Dict, Optional -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( EpManageFabricConfigDeployPost, - EpManageFabricConfigSavePost, EpManageFabricGet, ) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsConfigSavePost, +) from .exceptions import SwitchOperationError @@ -44,7 +46,7 @@ def __init__( self.log = logger or logging.getLogger("nd.FabricUtils") # Pre-configure endpoints - self.ep_config_save = EpManageFabricConfigSavePost() + self.ep_config_save = EpManageFabricsActionsConfigSavePost() self.ep_config_save.fabric_name = fabric self.ep_config_deploy = EpManageFabricConfigDeployPost() diff --git a/plugins/module_utils/utils/nd_manage_switches/payload_utils.py b/plugins/module_utils/utils/manage_switches/payload_utils.py similarity index 96% rename from plugins/module_utils/utils/nd_manage_switches/payload_utils.py rename to plugins/module_utils/utils/manage_switches/payload_utils.py index effadfb8..84e99b99 100644 --- a/plugins/module_utils/utils/nd_manage_switches/payload_utils.py +++ b/plugins/module_utils/utils/manage_switches/payload_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_helpers.py b/plugins/module_utils/utils/manage_switches/switch_helpers.py similarity index 97% rename from plugins/module_utils/utils/nd_manage_switches/switch_helpers.py rename to plugins/module_utils/utils/manage_switches/switch_helpers.py index bffb2bdb..55f71ba9 100644 --- a/plugins/module_utils/utils/nd_manage_switches/switch_helpers.py +++ b/plugins/module_utils/utils/manage_switches/switch_helpers.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or # https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py b/plugins/module_utils/utils/manage_switches/switch_wait_utils.py similarity index 97% rename from plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py rename to plugins/module_utils/utils/manage_switches/switch_wait_utils.py index dda4c712..2d6e281d 100644 --- a/plugins/module_utils/utils/nd_manage_switches/switch_wait_utils.py +++ b/plugins/module_utils/utils/manage_switches/switch_wait_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -14,14 +14,14 @@ import time from typing import Any, Dict, List, Optional -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_config import ( - EpManageFabricInventoryDiscoverGet, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_inventory import ( + EpManageFabricsInventoryDiscoverGet, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switches import ( - EpManageFabricSwitchesGet, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, ) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.nd_manage_switches.fabric_switch_actions import ( - EpManageFabricSwitchActionsRediscoverPost, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsRediscoverPost, ) from .fabric_utils import FabricUtils @@ -94,13 +94,13 @@ def __init__( ) # Pre-configure endpoints - self.ep_switches_get = EpManageFabricSwitchesGet() + self.ep_switches_get = EpManageFabricsSwitchesGet() self.ep_switches_get.fabric_name = fabric - self.ep_inventory_discover = EpManageFabricInventoryDiscoverGet() + self.ep_inventory_discover = EpManageFabricsInventoryDiscoverGet() self.ep_inventory_discover.fabric_name = fabric - self.ep_rediscover = EpManageFabricSwitchActionsRediscoverPost() + self.ep_rediscover = EpManageFabricsSwitchActionsRediscoverPost() self.ep_rediscover.fabric_name = fabric # Cached greenfield flag diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index df0a53d7..9e1ea604 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -1,21 +1,21 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: (c) 2026, Akshayanat Chengam Saravanan (@achengam) +# Copyright: (c) 2026, Akshayanat C S (@achengam) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type __copyright__ = "Copyright (c) 2026 Cisco and/or its affiliates." -__author__ = "Akshayanat Chengam Saravanan" +__author__ = "Akshayanat C S" DOCUMENTATION = """ --- module: nd_manage_switches short_description: Manage switches in Cisco Nexus Dashboard (ND). version_added: "1.0.0" -author: Akshayanat Chengam Saravanan (@achengam) +author: Akshayanat C S (@achengam) description: - Add, delete, and override switches in Cisco Nexus Dashboard. - Supports normal discovery, POAP (bootstrap/preprovision), and RMA operations. diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py new file mode 100644 index 00000000..a3a088b2 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py @@ -0,0 +1,177 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_credentials_switches.py + +Tests the ND Manage Credentials Switches endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_credentials_switches import ( + CredentialsSwitchesEndpointParams, + EpManageCredentialsSwitchesPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: CredentialsSwitchesEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_credentials_switches_00010(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams default values + + ## Test + + - ticket_id defaults to None + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.__init__() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams() + assert params.ticket_id is None + + +def test_endpoints_api_v1_manage_credentials_switches_00020(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams ticket_id can be set + + ## Test + + - ticket_id can be set to a string value + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.__init__() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + assert params.ticket_id == "CHG12345" + + +def test_endpoints_api_v1_manage_credentials_switches_00030(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams generates correct query string + + ## Test + + - to_query_string() returns ticketId=CHG12345 when ticket_id is set + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.to_query_string() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + result = params.to_query_string() + assert result == "ticketId=CHG12345" + + +def test_endpoints_api_v1_manage_credentials_switches_00040(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when ticket_id is not set + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.to_query_string() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageCredentialsSwitchesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_credentials_switches_00100(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.__init__() + - EpManageCredentialsSwitchesPost.class_name + - EpManageCredentialsSwitchesPost.verb + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + assert instance.class_name == "EpManageCredentialsSwitchesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_credentials_switches_00110(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost path without query params + + ## Test + + - path returns the correct base endpoint path + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + result = instance.path + assert result == "/api/v1/manage/credentials/switches" + + +def test_endpoints_api_v1_manage_credentials_switches_00120(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/credentials/switches?ticketId=CHG12345" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py new file mode 100644 index 00000000..b0ed3f95 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -0,0 +1,271 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics.py + +Tests the ND Manage Fabrics endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricConfigDeployPost, + EpManageFabricGet, + FabricConfigDeployEndpointParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: FabricConfigDeployEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00010(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams default values + + ## Test + + - force_show_run defaults to None + - incl_all_msd_switches defaults to None + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + assert params.force_show_run is None + assert params.incl_all_msd_switches is None + + +def test_endpoints_api_v1_manage_fabrics_00020(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams force_show_run can be set + + ## Test + + - force_show_run can be set to True + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True) + assert params.force_show_run is True + + +def test_endpoints_api_v1_manage_fabrics_00030(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams generates query string with both params + + ## Test + + - to_query_string() includes forceShowRun and inclAllMsdSwitches when both are set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) + result = params.to_query_string() + assert "forceShowRun=true" in result + assert "inclAllMsdSwitches=true" in result + + +def test_endpoints_api_v1_manage_fabrics_00040(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageFabricConfigDeployPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00100(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.__init__() + - EpManageFabricConfigDeployPost.class_name + - EpManageFabricConfigDeployPost.verb + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + assert instance.class_name == "EpManageFabricConfigDeployPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_00110(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + instance = EpManageFabricConfigDeployPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_00120(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy" + + +def test_endpoints_api_v1_manage_fabrics_00130(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path with force_show_run + + ## Test + + - path includes forceShowRun in query string when set to True + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.force_show_run = True + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" + + +# ============================================================================= +# Test: EpManageFabricGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00200(): + """ + # Summary + + Verify EpManageFabricGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricGet.__init__() + - EpManageFabricGet.class_name + - EpManageFabricGet.verb + """ + with does_not_raise(): + instance = EpManageFabricGet() + assert instance.class_name == "EpManageFabricGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_00210(): + """ + # Summary + + Verify EpManageFabricGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricGet.path + """ + instance = EpManageFabricGet() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_00220(): + """ + # Summary + + Verify EpManageFabricGet path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricGet.path + """ + with does_not_raise(): + instance = EpManageFabricGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py new file mode 100644 index 00000000..263b9f0c --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py @@ -0,0 +1,162 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_actions.py + +Tests the ND Manage Fabrics Actions endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsConfigSavePost, + EpManageFabricsActionsShallowDiscoveryPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsActionsShallowDiscoveryPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_actions_00100(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.__init__() + - EpManageFabricsActionsShallowDiscoveryPost.class_name + - EpManageFabricsActionsShallowDiscoveryPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsActionsShallowDiscoveryPost() + assert instance.class_name == "EpManageFabricsActionsShallowDiscoveryPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_actions_00110(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.path + """ + instance = EpManageFabricsActionsShallowDiscoveryPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_actions_00120(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.path + """ + with does_not_raise(): + instance = EpManageFabricsActionsShallowDiscoveryPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/shallowDiscovery" + + +# ============================================================================= +# Test: EpManageFabricsActionsConfigSavePost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_actions_00200(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.__init__() + - EpManageFabricsActionsConfigSavePost.class_name + - EpManageFabricsActionsConfigSavePost.verb + """ + with does_not_raise(): + instance = EpManageFabricsActionsConfigSavePost() + assert instance.class_name == "EpManageFabricsActionsConfigSavePost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_actions_00210(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.path + """ + instance = EpManageFabricsActionsConfigSavePost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_actions_00220(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.path + """ + with does_not_raise(): + instance = EpManageFabricsActionsConfigSavePost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configSave" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py new file mode 100644 index 00000000..bf5f6c68 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py @@ -0,0 +1,206 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_bootstrap.py + +Tests the ND Manage Fabrics Bootstrap endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( + EpManageFabricsBootstrapGet, + FabricsBootstrapEndpointParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: FabricsBootstrapEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00010(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams default values + + ## Test + + - max defaults to None + - offset defaults to None + - filter defaults to None + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams() + assert params.max is None + assert params.offset is None + assert params.filter is None + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00020(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams max can be set + + ## Test + + - max can be set to an integer value + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams(max=50) + assert params.max == 50 + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00030(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams generates query string with pagination + + ## Test + + - to_query_string() returns correct format with max and offset + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams(max=50, offset=0) + result = params.to_query_string() + assert "max=50" in result + assert "offset=0" in result + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00040(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageFabricsBootstrapGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00100(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.__init__() + - EpManageFabricsBootstrapGet.class_name + - EpManageFabricsBootstrapGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + assert instance.class_name == "EpManageFabricsBootstrapGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00110(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + instance = EpManageFabricsBootstrapGet() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00120(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/bootstrap" + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00130(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet path with pagination params + + ## Test + + - path includes max and offset in query string when set + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + instance.fabric_name = "MyFabric" + instance.endpoint_params.max = 50 + instance.endpoint_params.offset = 0 + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/bootstrap?") + assert "max=50" in result + assert "offset=0" in result diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py new file mode 100644 index 00000000..d53488ea --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py @@ -0,0 +1,92 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_inventory.py + +Tests the ND Manage Fabrics Inventory endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_inventory import ( + EpManageFabricsInventoryDiscoverGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsInventoryDiscoverGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_inventory_00010(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.__init__() + - EpManageFabricsInventoryDiscoverGet.class_name + - EpManageFabricsInventoryDiscoverGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsInventoryDiscoverGet() + assert instance.class_name == "EpManageFabricsInventoryDiscoverGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_inventory_00020(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.path + """ + instance = EpManageFabricsInventoryDiscoverGet() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_inventory_00030(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.path + """ + with does_not_raise(): + instance = EpManageFabricsInventoryDiscoverGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/inventory/discover" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py new file mode 100644 index 00000000..0ce1af96 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py @@ -0,0 +1,491 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_switchactions.py + +Tests the ND Manage Fabrics Switch Actions endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsChangeRolesPost, + EpManageFabricsSwitchActionsImportBootstrapPost, + EpManageFabricsSwitchActionsPreProvisionPost, + EpManageFabricsSwitchActionsRediscoverPost, + EpManageFabricsSwitchActionsRemovePost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsRemovePost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00100(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.__init__() + - EpManageFabricsSwitchActionsRemovePost.class_name + - EpManageFabricsSwitchActionsRemovePost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + assert instance.class_name == "EpManageFabricsSwitchActionsRemovePost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00110(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + instance = EpManageFabricsSwitchActionsRemovePost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00120(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/remove" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00130(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost path with force and ticket_id + + ## Test + + - path includes force and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.force = True + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/remove?") + assert "force=true" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsChangeRolesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00200(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.__init__() + - EpManageFabricsSwitchActionsChangeRolesPost.class_name + - EpManageFabricsSwitchActionsChangeRolesPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + assert instance.class_name == "EpManageFabricsSwitchActionsChangeRolesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00210(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + instance = EpManageFabricsSwitchActionsChangeRolesPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00220(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00230(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsImportBootstrapPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00300(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.__init__() + - EpManageFabricsSwitchActionsImportBootstrapPost.class_name + - EpManageFabricsSwitchActionsImportBootstrapPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + assert instance.class_name == "EpManageFabricsSwitchActionsImportBootstrapPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00310(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00320(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00330(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsPreProvisionPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00400(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.__init__() + - EpManageFabricsSwitchActionsPreProvisionPost.class_name + - EpManageFabricsSwitchActionsPreProvisionPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + assert instance.class_name == "EpManageFabricsSwitchActionsPreProvisionPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00410(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + instance = EpManageFabricsSwitchActionsPreProvisionPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00420(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/preProvision" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00430(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsRediscoverPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00700(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.__init__() + - EpManageFabricsSwitchActionsRediscoverPost.class_name + - EpManageFabricsSwitchActionsRediscoverPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + assert instance.class_name == "EpManageFabricsSwitchActionsRediscoverPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00710(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + instance = EpManageFabricsSwitchActionsRediscoverPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00720(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00730(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py new file mode 100644 index 00000000..a5d7217f --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py @@ -0,0 +1,614 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_switches.py + +Tests the ND Manage Fabrics Switches endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, + EpManageFabricsSwitchesPost, + EpManageFabricsSwitchChangeSerialNumberPost, + EpManageFabricsSwitchProvisionRMAPost, + FabricSwitchesAddEndpointParams, + FabricSwitchesGetEndpointParams, + SwitchActionsClusterEndpointParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: FabricSwitchesGetEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00010(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams default values + + ## Test + + - hostname defaults to None + - max defaults to None + - offset defaults to None + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams() + assert params.hostname is None + assert params.max is None + assert params.offset is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00020(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams hostname can be set + + ## Test + + - hostname can be set to a string value + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams(hostname="leaf1") + assert params.hostname == "leaf1" + + +def test_endpoints_api_v1_manage_fabrics_switches_00030(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams generates query string with hostname and max + + ## Test + + - to_query_string() includes hostname and max when both are set + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) + result = params.to_query_string() + assert "hostname=leaf1" in result + assert "max=100" in result + + +def test_endpoints_api_v1_manage_fabrics_switches_00040(): + """ + # Summary + + Verify FabricSwitchesAddEndpointParams default values + + ## Test + + - cluster_name defaults to None + - ticket_id defaults to None + + ## Classes and Methods + + - FabricSwitchesAddEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesAddEndpointParams() + assert params.cluster_name is None + assert params.ticket_id is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00050(): + """ + # Summary + + Verify FabricSwitchesAddEndpointParams generates query string with both params + + ## Test + + - to_query_string() includes clusterName and ticketId when both are set + + ## Classes and Methods + + - FabricSwitchesAddEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + result = params.to_query_string() + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchesGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00100(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.__init__() + - EpManageFabricsSwitchesGet.class_name + - EpManageFabricsSwitchesGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + assert instance.class_name == "EpManageFabricsSwitchesGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_switches_00110(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + instance = EpManageFabricsSwitchesGet() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00120(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches" + + +def test_endpoints_api_v1_manage_fabrics_switches_00130(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet path with hostname filter + + ## Test + + - path includes hostname in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + instance.fabric_name = "MyFabric" + instance.endpoint_params.hostname = "leaf1" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1" + + +# ============================================================================= +# Test: EpManageFabricsSwitchesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00200(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.__init__() + - EpManageFabricsSwitchesPost.class_name + - EpManageFabricsSwitchesPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + assert instance.class_name == "EpManageFabricsSwitchesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00210(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + instance = EpManageFabricsSwitchesPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00220(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches" + + +def test_endpoints_api_v1_manage_fabrics_switches_00230(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switches?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: SwitchActionsClusterEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00300(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams basic instantiation + + ## Test + + - Instance can be created with defaults + - cluster_name defaults to None + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.__init__() + """ + with does_not_raise(): + instance = SwitchActionsClusterEndpointParams() + assert instance.cluster_name is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00310(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams to_query_string returns empty when no params set + + ## Test + + - to_query_string() returns empty string when cluster_name is None + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.to_query_string() + """ + instance = SwitchActionsClusterEndpointParams() + assert instance.to_query_string() == "" + + +def test_endpoints_api_v1_manage_fabrics_switches_00320(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams to_query_string with cluster_name + + ## Test + + - to_query_string() returns "clusterName=cluster1" when cluster_name is set + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.to_query_string() + """ + instance = SwitchActionsClusterEndpointParams(cluster_name="cluster1") + assert instance.to_query_string() == "clusterName=cluster1" + + +# ============================================================================= +# Test: EpManageFabricsSwitchProvisionRMAPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00500(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.__init__() + - EpManageFabricsSwitchProvisionRMAPost.class_name + - EpManageFabricsSwitchProvisionRMAPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + assert instance.class_name == "EpManageFabricsSwitchProvisionRMAPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00510(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + instance = EpManageFabricsSwitchProvisionRMAPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00520(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost raises ValueError when switch_sn is not set + + ## Test + + - Accessing path raises ValueError when switch_sn is None + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00530(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost path without query params + + ## Test + + - Path is correctly built with fabric_name and switch_sn + - No query string appended when ticket_id is not set + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" + + +def test_endpoints_api_v1_manage_fabrics_switches_00540(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost path with ticket_id + + ## Test + + - Path includes ticketId query parameter when set + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" + + +# ============================================================================= +# Test: EpManageFabricsSwitchChangeSerialNumberPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00600(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.__init__() + - EpManageFabricsSwitchChangeSerialNumberPost.class_name + - EpManageFabricsSwitchChangeSerialNumberPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + assert instance.class_name == "EpManageFabricsSwitchChangeSerialNumberPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00610(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + instance = EpManageFabricsSwitchChangeSerialNumberPost() + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00620(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost raises ValueError when switch_sn is not set + + ## Test + + - Accessing path raises ValueError when switch_sn is None + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + with pytest.raises(ValueError): + _ = instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00630(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost path without query params + + ## Test + + - Path is correctly built with fabric_name and switch_sn + - No query string appended when cluster_name is not set + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" + + +def test_endpoints_api_v1_manage_fabrics_switches_00640(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost path with cluster_name + + ## Test + + - Path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" + From f9900f8270a2dc0925a74f5ebf4c8d71b8d73225 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 19 Mar 2026 16:04:28 +0530 Subject: [PATCH 021/109] Remove NDOutput Changes --- plugins/module_utils/nd_switch_resources.py | 141 +------------------- 1 file changed, 5 insertions(+), 136 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 5fdb2c47..38241cb9 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -23,7 +23,6 @@ from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType -from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches import ( @@ -87,42 +86,6 @@ _DISCOVERY_MAX_HOPS: int = 0 -# ========================================================================= -# Output Collections -# ========================================================================= - -class SwitchOutputCollection(NDConfigCollection): - """Output collection for all output keys (previous, current, proposed, diff). - - Accepts ``SwitchDataModel``, ``SwitchConfigModel``, or ``_DiffRecord`` items - and serializes them via ``to_config_dict()``. - """ - - def __init__(self, model_class=None, items: Optional[List] = None): - # Store directly — skip add() type guard to support mixed-type diffs. - self._model_class = model_class - self._items: List = list(items) if items else [] - self._index: Dict = {} - - def to_ansible_config(self, **kwargs) -> List[Dict]: - return [item.to_config_dict() for item in self._items] - - def copy(self) -> "SwitchOutputCollection": - return SwitchOutputCollection( - model_class=self._model_class, - items=deepcopy(list(self._items)), - ) - - -@dataclass -class _DiffRecord: - """Wraps a plain dict as a diff entry, exposing ``to_config_dict()``.""" - - data: Dict[str, Any] - - def to_config_dict(self) -> Dict[str, Any]: - return self.data - @dataclass class SwitchServiceContext: @@ -142,7 +105,6 @@ class SwitchServiceContext: log: logging.Logger save_config: bool = True deploy_config: bool = True - output: Optional[NDOutput] = None # ========================================================================= @@ -1309,20 +1271,6 @@ def handle( if preprov_models: self._preprovision_switches(preprov_models) - if self.ctx.output: - diff_items = [ - _DiffRecord({ - "serial_number": m.serial_number, - "hostname": m.hostname, - "ip": m.ip, - "model": m.model, - "software_version": m.software_version, - "role": m.switch_role, - }) - for m in preprov_models - ] - self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) - # Edge case: nothing actionable if not bootstrap_entries and not preprov_entries and not swap_entries: log.warning("No POAP switch models built — nothing to process") @@ -1418,21 +1366,6 @@ def _handle_poap_bootstrap( skip_greenfield_check=True, ) - if self.ctx.output: - import_by_serial = {m.serial_number: m for m in import_models} - diff_items = [ - _DiffRecord({ - "seed_ip": switch_cfg.seed_ip, - "serial_number": serial, - "hostname": import_by_serial[serial].hostname if serial in import_by_serial else None, - "model": import_by_serial[serial].model if serial in import_by_serial else None, - "software_version": import_by_serial[serial].version if serial in import_by_serial else None, - "role": switch_cfg.role, - }) - for serial, switch_cfg in switch_actions - ] - self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) - log.debug("EXIT: _handle_poap_bootstrap()") def _build_bootstrap_import_model( @@ -2074,19 +2007,6 @@ def handle( log.error(msg) nd.module.fail_json(msg=msg) - if self.ctx.output: - diff_items = [ - _DiffRecord({ - "seed_ip": switch_cfg.seed_ip, - "old_serial_number": old_serial, - "new_serial_number": new_serial, - "hostname": old_switch_info[old_serial]["hostname"], - "role": switch_cfg.role, - }) - for new_serial, old_serial, switch_cfg in rma_diff_data - ] - self.ctx.output.assign(diff=SwitchOutputCollection(items=diff_items)) - self.fabric_ops.bulk_save_credentials(switch_actions) try: @@ -2391,11 +2311,11 @@ def __init__( # Switch collections try: self.proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) - self.existing: SwitchOutputCollection = SwitchOutputCollection.from_api_response( + self.existing: NDConfigCollection = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel, ) - self.previous: SwitchOutputCollection = self.existing.copy() + self.previous: NDConfigCollection = self.existing.copy() except Exception as e: msg = ( f"Failed to query fabric '{self.fabric}' inventory " @@ -2407,12 +2327,6 @@ def __init__( # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] - # Output tracking — NDOutput serializes all collections via their - # overridden to_ansible_config() methods. - self.output = NDOutput(output_level=self.module.params.get("output_level", "normal")) - self.output.assign(before=self.previous, after=self.existing) - self.ctx.output = self.output - # Utility instances (SwitchWaitUtils / FabricUtils depend on self) self.fabric_utils = FabricUtils(self.nd, self.fabric, log) self.wait_utils = SwitchWaitUtils( @@ -2442,27 +2356,12 @@ def exit_json(self) -> None: # Re-query the fabric to get the actual post-operation inventory so # that "current" reflects real state rather than the pre-op snapshot. if True not in self.results.failed and not self.nd.module.check_mode: - self.existing = SwitchOutputCollection.from_api_response( + self.existing = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel ) - self.output.assign(after=self.existing) - - self.output._changed = bool(final.get("changed", False)) - formatted = self.output.format() - - output_level = formatted["output_level"] - # Rename before/after to previous/current for backward compatibility. - final["previous"] = formatted.pop("before", []) - final["current"] = formatted.pop("after", []) - final["output_level"] = output_level - final["diff"] = formatted.get("diff", []) - - if output_level in ("info", "debug"): - final["proposed"] = formatted.get("proposed", []) - if output_level == "debug": - # Override NDOutput's placeholder with real operation logs. - final["logs"] = self.nd_logs + final["previous"] = self.previous.to_ansible_config() + final["current"] = self.existing.to_ansible_config() if True in self.results.failed: self.nd.module.fail_json(**final) @@ -2491,12 +2390,6 @@ def manage_state(self) -> None: if self.config else None ) - if proposed_config: - self.output.assign( - proposed=SwitchOutputCollection( - model_class=SwitchConfigModel, items=proposed_config - ) - ) return self._handle_deleted_state(proposed_config) # merged / overridden — config is required @@ -2508,12 +2401,6 @@ def manage_state(self) -> None: proposed_config = SwitchDiffEngine.validate_configs( self.config, self.state, self.nd, self.log ) - # Register proposed config (credentials excluded via SwitchOutputCollection) - self.output.assign( - proposed=SwitchOutputCollection( - model_class=SwitchConfigModel, items=proposed_config - ) - ) # Partition configs by operation type poap_configs = [c for c in proposed_config if c.operation_type == "poap"] rma_configs = [c for c in proposed_config if c.operation_type == "rma"] @@ -2633,7 +2520,6 @@ def _handle_merged_state( # Collect (serial_number, SwitchConfigModel) pairs for post-processing switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - diff_items: List = [] # Phase 4: Bulk add new switches to fabric if switches_to_add and discovered_data: @@ -2677,17 +2563,6 @@ def _handle_merged_state( sn = disc.get("serialNumber") if sn: switch_actions.append((sn, cfg)) - # Discovery response has softwareVersion, hostname, - # model — richer than SwitchConfigModel fields. - diff_items.append(_DiffRecord({ - "seed_ip": cfg.seed_ip, - "serial_number": sn, - "hostname": disc.get("hostname"), - "model": disc.get("model"), - "role": cfg.role, - "software_version": disc.get("softwareVersion"), - "mode": None, - })) self._log_operation("add", cfg.seed_ip) # Phase 5: Collect migration switches for post-processing @@ -2701,9 +2576,6 @@ def _handle_merged_state( cfg = config_by_ip.get(mig_sw.fabric_management_ip) if cfg and mig_sw.switch_id: switch_actions.append((mig_sw.switch_id, cfg)) - # mig_sw is a SwitchDataModel — has all 7 fields including - # software_version and mode from the inventory API. - diff_items.append(mig_sw) self._log_operation("migrate", mig_sw.fabric_management_ip) if not switch_actions: @@ -2731,8 +2603,6 @@ def _handle_merged_state( all_preserve_config=all_preserve_config, update_roles=have_migration_switches, ) - self.output.assign(diff=SwitchOutputCollection(items=diff_items)) - self.log.debug("EXIT: _handle_merged_state() - completed") # ----------------------------------------------------------------- @@ -2963,7 +2833,6 @@ def _handle_deleted_state( f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" ) self.fabric_ops.bulk_delete(switches_to_delete) - self.output.assign(diff=SwitchOutputCollection(items=switches_to_delete)) self.log.debug("EXIT: _handle_deleted_state()") # ===================================================================== From 36488a82ad961852512b9609a86b9ffcd9996ed7 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 20 Mar 2026 12:16:19 +0530 Subject: [PATCH 022/109] Module Cleanup + Check Mode --- .../models/manage_switches/config_models.py | 15 +++ plugins/module_utils/nd_switch_resources.py | 48 ++++--- plugins/modules/nd_manage_switches.py | 123 ++---------------- 3 files changed, 55 insertions(+), 131 deletions(-) diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 94336143..c8b97195 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -591,6 +591,21 @@ def to_payload(self) -> Dict[str, Any]: exclude_none=True, ) + @classmethod + def get_argument_spec(cls) -> Dict[str, Any]: + """Return the Ansible argument spec for nd_manage_switches.""" + return dict( + fabric=dict(type="str", required=True), + state=dict( + type="str", + default="merged", + choices=["merged", "overridden", "deleted"], + ), + save=dict(type="bool", default=True), + deploy=dict(type="bool", default=True), + config=dict(type="list", elements="dict"), + ) + __all__ = [ "ConfigDataModel", diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 38241cb9..b69c342d 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -1167,20 +1167,7 @@ def handle( log.debug("ENTER: POAPHandler.handle()") log.info(f"Processing POAP for {len(proposed_config)} switch config(s)") - # Check mode — preview only - if nd.module.check_mode: - log.info("Check mode: would run POAP bootstrap / pre-provision") - results.action = "poap" - results.operation_type = OperationType.CREATE - results.response_current = {"MESSAGE": "check mode — skipped"} - results.result_current = {"success": True, "changed": True} - results.diff_current = { - "poap_switches": [pc.seed_ip for pc in proposed_config] - } - results.register_api_call() - return - - # Classify entries + # Classify entries first so check mode can report per-operation counts bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] preprov_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] @@ -1211,6 +1198,24 @@ def handle( f"{len(swap_entries)} swap" ) + # Check mode — preview only + if nd.module.check_mode: + log.info( + f"Check mode: would bootstrap {len(bootstrap_entries)}, " + f"pre-provision {len(preprov_entries)}, swap {len(swap_entries)}" + ) + results.action = "poap" + results.operation_type = OperationType.CREATE + results.response_current = {"MESSAGE": "check mode — skipped"} + results.result_current = {"success": True, "changed": False} + results.diff_current = { + "bootstrap": [cfg.seed_ip for cfg, _ in bootstrap_entries], + "preprovision": [cfg.seed_ip for cfg, _ in preprov_entries], + "swap": [cfg.seed_ip for cfg, _ in swap_entries], + } + results.register_api_call() + return + # Idempotency: skip entries whose target serial is already in the fabric. # Build lookup structures for idempotency checks. # Bootstrap: idempotent when both IP address AND serial number match. @@ -1904,7 +1909,7 @@ def handle( results.action = "rma" results.operation_type = OperationType.CREATE results.response_current = {"MESSAGE": "check mode — skipped"} - results.result_current = {"success": True, "changed": True} + results.result_current = {"success": True, "changed": False} results.diff_current = { "rma_switches": [pc.seed_ip for pc in proposed_config] } @@ -2503,17 +2508,19 @@ def _handle_merged_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - f"Check mode: would add {len(switches_to_add)} and " - f"process {len(migration_switches)} migration switches" + f"Check mode: would add {len(switches_to_add)}, " + f"process {len(migration_switches)} migration switch(es), " + f"save_deploy_required={idempotent_save_req}" ) self.results.action = "merge" self.results.state = self.state self.results.operation_type = OperationType.CREATE self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} - self.results.result_current = {"success": True, "changed": True} + self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_add": [sw.fabric_management_ip for sw in switches_to_add], "migration_mode": [sw.fabric_management_ip for sw in migration_switches], + "save_deploy_required": idempotent_save_req, } self.results.register_api_call() return @@ -2710,12 +2717,11 @@ def _handle_overridden_state( f"delete-and-re-add {n_update}, " f"add {n_add}, migrate {n_migrate}" ) - would_change = (n_delete + n_update + n_add + n_migrate) > 0 self.results.action = "override" self.results.state = self.state self.results.operation_type = OperationType.CREATE self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} - self.results.result_current = {"success": True, "changed": would_change} + self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_delete": n_delete, "to_update": n_update, @@ -2822,7 +2828,7 @@ def _handle_deleted_state( self.results.state = self.state self.results.operation_type = OperationType.DELETE self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} - self.results.result_current = {"success": True, "changed": True} + self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], } diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 9e1ea604..ffd39f01 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -372,6 +372,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel from ansible_collections.cisco.nd.plugins.module_utils.nd_switch_resources import NDSwitchResourceModule from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( NDModule, @@ -383,99 +384,10 @@ def main(): """Main entry point for the nd_manage_switches module.""" - + # Build argument spec argument_spec = nd_argument_spec() - argument_spec.update( - fabric=dict(type="str", required=True), - config=dict( - type="list", - elements="dict", - options=dict( - seed_ip=dict(type="str", required=True), - auth_proto=dict( - type="str", - default="MD5", - choices=["MD5", "SHA", "MD5_DES", "MD5_AES", "SHA_DES", "SHA_AES"] - ), - user_name=dict(type="str", default="admin"), - password=dict(type="str", no_log=True), - role=dict( - type="str", - default="leaf", - choices=[ - "leaf", "spine", "border", "border_spine", - "border_gateway", "border_gateway_spine", - "super_spine", "border_super_spine", - "border_gateway_super_spine", "access", - "aggregation", "edge_router", "core_router", "tor" - ] - ), - preserve_config=dict(type="bool", default=False), - poap=dict( - type="list", - elements="dict", - options=dict( - discovery_username=dict(type="str"), - discovery_password=dict(type="str", no_log=True), - serial_number=dict(type="str"), - preprovision_serial=dict(type="str"), - model=dict(type="str"), - version=dict(type="str"), - hostname=dict(type="str"), - image_policy=dict(type="str"), - config_data=dict( - type="dict", - options=dict( - models=dict( - type="list", - elements="str", - ), - gateway=dict( - type="str", - ), - ), - ), - ), - ), - rma=dict( - type="list", - elements="dict", - options=dict( - old_serial=dict(type="str", required=True), - serial_number=dict(type="str", required=True), - model=dict(type="str", required=True), - version=dict(type="str", required=True), - image_policy=dict(type="str"), - discovery_username=dict(type="str"), - discovery_password=dict(type="str", no_log=True), - config_data=dict( - type="dict", - required=True, - options=dict( - models=dict( - type="list", - elements="str", - required=True, - ), - gateway=dict( - type="str", - required=True, - ), - ), - ), - ), - ), - ), - ), - save=dict(type="bool", default=True), - deploy=dict(type="bool", default=True), - state=dict( - type="str", - default="merged", - choices=["merged", "overridden", "deleted"] - ), - ) + argument_spec.update(SwitchConfigModel.get_argument_spec()) # Create Ansible module module = AnsibleModule( @@ -490,7 +402,6 @@ def main(): # Initialize logging try: log_config = Log() - log_config.config = "/Users/achengam/Documents/Ansible_Dev/NDBranch/ansible_collections/cisco/nd/ansible_cisco_log_r.json" log_config.commit() # Create logger instance for this module log = logging.getLogger("nd.nd_manage_switches") @@ -498,22 +409,16 @@ def main(): module.fail_json(msg=str(error)) # Get parameters - state = module.params.get("state") - fabric = module.params.get("fabric") output_level = module.params.get("output_level") # Initialize Results - this collects all operation results results = Results() - results.state = state results.check_mode = module.check_mode - results.action = f"manage_switches_{state}" + results.action = "manage_switches" try: - log.info(f"Starting nd_manage_switches module: fabric={fabric}, state={state}") - # Initialize NDModule (uses RestSend infrastructure internally) nd = NDModule(module) - log.info("NDModule initialized successfully") # Create NDSwitchResourceModule sw_module = NDSwitchResourceModule( @@ -521,12 +426,10 @@ def main(): results=results, logger=log ) - log.info(f"NDSwitchResourceModule initialized for fabric: {fabric}") - + # Manage state for merged, overridden, deleted - log.info(f"Managing state: {state}") sw_module.manage_state() - + # Exit with results log.info(f"State management completed successfully. Changed: {results.changed}") sw_module.exit_json() @@ -534,7 +437,7 @@ def main(): except NDModuleError as error: # NDModule-specific errors (API failures, authentication issues, etc.) log.error(f"NDModule error: {error.msg}") - + # Try to get response from RestSend if available try: results.response_current = nd.rest_send.response_current @@ -550,15 +453,15 @@ def main(): "success": False, "found": False, } - + results.diff_current = {} results.register_api_call() results.build_final_result() - + # Add error details if debug output is requested if output_level == "debug": results.final_result["error_details"] = error.to_dict() - + log.error(f"Module failed: {results.final_result}") module.fail_json(msg=error.msg, **results.final_result) @@ -566,7 +469,7 @@ def main(): # Unexpected errors log.error(f"Unexpected error during module execution: {str(error)}") log.error(f"Error type: {type(error).__name__}") - + # Build failed result results.response_current = { "RETURN_CODE": -1, @@ -580,11 +483,11 @@ def main(): results.diff_current = {} results.register_api_call() results.build_final_result() - + if output_level == "debug": import traceback results.final_result["traceback"] = traceback.format_exc() - + module.fail_json(msg=str(error), **results.final_result) From e5ae068e81d1a93e7eda6620ccc03032648188e7 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 20 Mar 2026 14:38:54 +0530 Subject: [PATCH 023/109] Add gathered state support to the module --- .../models/manage_switches/config_models.py | 62 ++++++++++++++++- plugins/module_utils/nd_switch_resources.py | 69 ++++++++++++++++--- plugins/modules/nd_manage_switches.py | 25 +++++-- 3 files changed, 142 insertions(+), 14 deletions(-) diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index c8b97195..b464966f 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -591,6 +591,66 @@ def to_payload(self) -> Dict[str, Any]: exclude_none=True, ) + @classmethod + def from_switch_data(cls, sw: Any) -> "SwitchConfigModel": + """Build a config-shaped entry from a live inventory record. + + Only the fields recoverable from the ND inventory API are populated. + Credentials (user_name, password) are intentionally omitted. + + Args: + sw: A SwitchDataModel instance from the fabric inventory. + + Returns: + SwitchConfigModel instance with seed_ip, role, and platform_type + populated from live data. + + Raises: + ValueError: If the inventory record is missing a management IP, + making it impossible to construct a valid config entry. + """ + if not sw.fabric_management_ip: + raise ValueError( + f"Switch {sw.switch_id!r} has no fabric_management_ip — " + "cannot build a gathered config entry without a seed IP." + ) + + platform_type = ( + sw.additional_data.platform_type + if sw.additional_data and hasattr(sw.additional_data, "platform_type") + else None + ) + + data: Dict[str, Any] = {"seed_ip": sw.fabric_management_ip} + if sw.switch_role is not None: + data["role"] = sw.switch_role + if platform_type is not None: + data["platform_type"] = platform_type + + return cls.model_validate(data) + + def to_gathered_dict(self) -> Dict[str, Any]: + """Return a config dict suitable for gathered output. + + platform_type is excluded (internal detail not needed by the user). + user_name and password are replaced with placeholders so the returned + data is immediately usable as ``config:`` input after substituting + real credentials. + + Returns: + Dict with seed_ip, role, auth_proto, preserve_config, + user_name set to ``""``, password set to ``""``. + """ + result = self.to_config(exclude={ + "platform_type": True, + "poap": True, + "rma": True, + "operation_type": True, + }) + result["user_name"] = "" + result["password"] = "" + return result + @classmethod def get_argument_spec(cls) -> Dict[str, Any]: """Return the Ansible argument spec for nd_manage_switches.""" @@ -599,7 +659,7 @@ def get_argument_spec(cls) -> Dict[str, Any]: state=dict( type="str", default="merged", - choices=["merged", "overridden", "deleted"], + choices=["merged", "overridden", "deleted", "gathered"], ), save=dict(type="bool", default=True), deploy=dict(type="bool", default=True), diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index b69c342d..af768af1 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -2358,15 +2358,29 @@ def exit_json(self) -> None: self.results.build_final_result() final = self.results.final_result - # Re-query the fabric to get the actual post-operation inventory so - # that "current" reflects real state rather than the pre-op snapshot. - if True not in self.results.failed and not self.nd.module.check_mode: - self.existing = NDConfigCollection.from_api_response( - response_data=self._query_all_switches(), model_class=SwitchDataModel - ) - - final["previous"] = self.previous.to_ansible_config() - final["current"] = self.existing.to_ansible_config() + if self.state == "gathered": + # gathered: expose the already-queried inventory in config shape. + # No re-query needed — nothing was changed. + gathered = [] + for sw in self.existing: + try: + gathered.append(SwitchConfigModel.from_switch_data(sw).to_gathered_dict()) + except (ValueError, Exception) as exc: + msg = ( + f"Failed to convert switch {sw.switch_id!r} to gathered format: {exc}" + ) + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + final["gathered"] = gathered + else: + # Re-query the fabric to get the actual post-operation inventory so + # that "current" reflects real state rather than the pre-op snapshot. + if True not in self.results.failed and not self.nd.module.check_mode: + self.existing = NDConfigCollection.from_api_response( + response_data=self._query_all_switches(), model_class=SwitchDataModel + ) + final["previous"] = self.previous.to_ansible_config() + final["current"] = self.existing.to_ansible_config() if True in self.results.failed: self.nd.module.fail_json(**final) @@ -2388,6 +2402,14 @@ def manage_state(self) -> None: """ self.log.info(f"Managing state: {self.state}") + # gathered — read-only, no config accepted + if self.state == "gathered": + if self.config: + self.nd.module.fail_json( + msg="'config' must not be provided for 'gathered' state." + ) + return self._handle_gathered_state() + # deleted — config is optional if self.state == "deleted": proposed_config = ( @@ -2776,6 +2798,35 @@ def _handle_overridden_state( self._handle_merged_state(diff, proposed_config, discovered_data) self.log.debug("EXIT: _handle_overridden_state()") + def _handle_gathered_state(self) -> None: + """Handle gathered-state read of the fabric inventory. + + No API writes are performed. The existing inventory is serialised into + SwitchConfigModel shape by exit_json(). This method only records the + result metadata so that Results aggregation works correctly. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_gathered_state()") + self.log.info(f"Gathering inventory for fabric '{self.fabric}'") + + if not self.existing: + self.log.info(f"Fabric '{self.fabric}' has no switches in inventory") + + self.results.action = "gathered" + self.results.state = self.state + self.results.operation_type = OperationType.QUERY + self.results.response_current = {"MESSAGE": "gathered", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = {} + self.results.register_api_call() + + self.log.info( + f"Gathered {len(list(self.existing))} switch(es) from fabric '{self.fabric}'" + ) + self.log.debug("EXIT: _handle_gathered_state()") + def _handle_deleted_state( self, proposed_config: Optional[List[SwitchConfigModel]] = None, diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index ffd39f01..b6f01cb8 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -32,12 +32,15 @@ - The state of ND and switch(es) after module completion. - C(merged) is the only state supported for POAP. - C(merged) is the only state supported for RMA. + - C(gathered) reads the current fabric inventory and returns it in the + C(gathered) key in config format. No changes are made. type: str default: merged choices: - merged - overridden - deleted + - gathered save: description: - Save/Recalculate the configuration of the fabric after inventory is updated. @@ -343,27 +346,41 @@ - seed_ip: 192.168.10.202 state: deleted +- name: Gather all switches from fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + state: gathered + register: result + """ RETURN = """ previous: description: The configuration prior to the module execution. - returned: always + returned: when state is not gathered type: list elements: dict proposed: description: The proposed configuration sent to the API. - returned: always + returned: when state is not gathered type: list elements: dict sent: description: The configuration sent to the API. - returned: always + returned: when state is not gathered type: list elements: dict current: description: The current configuration after module execution. - returned: always + returned: when state is not gathered + type: list + elements: dict +gathered: + description: + - The current fabric switch inventory returned in config format. + - Each entry mirrors the C(config) input schema (seed_ip, role, + auth_proto, preserve_config). Credentials are replaced with placeholders. + returned: when state is gathered type: list elements: dict """ From 79394df447a455468bfbe7c37cc69e1f328e046c Mon Sep 17 00:00:00 2001 From: AKDRG Date: Tue, 24 Mar 2026 01:15:36 +0530 Subject: [PATCH 024/109] Integration Tests + Fixes --- plugins/action/nd_inventory_validate.py | 265 +++++++++++++++ .../models/manage_switches/config_models.py | 45 +-- .../models/manage_switches/enums.py | 11 +- plugins/module_utils/nd_switch_resources.py | 26 +- .../nd_manage_switches/defaults/main.yaml | 2 + .../targets/nd_manage_switches/meta/main.yaml | 1 + .../nd_manage_switches/tasks/base_tasks.yaml | 67 ++++ .../tasks/conf_prep_tasks.yaml | 11 + .../nd_manage_switches/tasks/main.yaml | 17 + .../nd_manage_switches/tasks/query_task.yaml | 33 ++ .../templates/nd_manage_switches_conf.j2 | 62 ++++ .../nd_manage_switches/tests/nd/deleted.yaml | 143 ++++++++ .../nd_manage_switches/tests/nd/gathered.yaml | 64 ++++ .../nd_manage_switches/tests/nd/merged.yaml | 318 ++++++++++++++++++ .../tests/nd/overridden.yaml | 166 +++++++++ .../nd_manage_switches/tests/nd/poap.yaml | 265 +++++++++++++++ .../nd_manage_switches/tests/nd/rma.yaml | 182 ++++++++++ .../nd_manage_switches/tests/nd/sanity.yaml | 184 ++++++++++ 18 files changed, 1828 insertions(+), 34 deletions(-) create mode 100644 plugins/action/nd_inventory_validate.py create mode 100644 tests/integration/targets/nd_manage_switches/defaults/main.yaml create mode 100644 tests/integration/targets/nd_manage_switches/meta/main.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tasks/main.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tasks/query_task.yaml create mode 100644 tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml create mode 100644 tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml diff --git a/plugins/action/nd_inventory_validate.py b/plugins/action/nd_inventory_validate.py new file mode 100644 index 00000000..024ba634 --- /dev/null +++ b/plugins/action/nd_inventory_validate.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""ND Inventory Validation Action Plugin. + +Validates switch inventory data returned from nd_rest against expected +configuration entries. Checks that every entry in test_data has a matching +switch in the ND API response (fabricManagementIp == seed_ip, +switchRole == role). + +Supports an optional ``mode`` argument: + - ``"both"`` (default): match by seed_ip AND role. + - ``"ip"``: match by seed_ip only (role is ignored). + - ``"role"``: match by role only (seed_ip is ignored). +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import json +from typing import Any, Dict, List, Optional, Union + +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +try: + from pydantic import BaseModel, ValidationError, field_validator, model_validator + HAS_PYDANTIC = True +except ImportError: + HAS_PYDANTIC = False + +try: + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import SwitchDataModel + HAS_MODELS = True +except ImportError: + HAS_MODELS = False + +display = Display() + + +# --------------------------------------------------------------------------- +# Validation orchestration model +# --------------------------------------------------------------------------- + +class InventoryValidate(BaseModel): + """Orchestrates the match between playbook config entries and live ND inventory.""" + + config_data: Optional[List[Any]] = None + nd_data: Optional[List[Any]] = None + ignore_fields: Optional[Dict[str, int]] = None + response: Union[bool, None] = None + + @field_validator("config_data", mode="before") + @classmethod + def parse_config_data(cls, value): + """Coerce raw dicts into SwitchConfigModel instances. + + Accepts a single dict or a list of dicts. + """ + if isinstance(value, dict): + return [SwitchConfigModel.model_validate(value)] + if isinstance(value, list): + try: + return [ + SwitchConfigModel.model_validate(item) if isinstance(item, dict) else item + for item in value + ] + except (ValidationError, ValueError) as e: + raise ValueError("Invalid format in Config Data: {0}".format(e)) + if value is None: + return None + raise ValueError("Config Data must be a single/list of dictionary, or None.") + + @field_validator("nd_data", mode="before") + @classmethod + def parse_nd_data(cls, value): + """Coerce raw ND API switch dicts into SwitchDataModel instances.""" + if isinstance(value, list): + try: + return [ + SwitchDataModel.from_response(item) if isinstance(item, dict) else item + for item in value + ] + except (ValidationError, ValueError) as e: + raise ValueError("Invalid format in ND Response: {0}".format(e)) + if value is None: + return None + raise ValueError("ND Response must be a list of dictionaries.") + + @model_validator(mode="after") + def validate_lists_equality(self): + """Match every config entry against the live ND switch inventory. + + Sets ``self.response = True`` when all entries match, ``False`` otherwise. + Respects ``ignore_fields`` to support ip-only or role-only matching modes. + + Role comparison uses SwitchRole enum equality — no string normalization needed. + """ + config_data = self.config_data + nd_data_list = self.nd_data + ignore_fields = self.ignore_fields + + # Both empty → nothing to validate, treat as success. + # Exactly one empty → mismatch, treat as failure. + if not config_data and not nd_data_list: + self.response = True + return self + if not config_data or not nd_data_list: + self.response = False + return self + + missing_ips = [] + role_mismatches = {} + nd_data_copy = nd_data_list.copy() + matched_indices = set() + + for config_item in config_data: + found_match = False + seed_ip = config_item.seed_ip + role_expected = config_item.role # SwitchRole enum or None + + for i, nd_item in enumerate(nd_data_copy): + if i in matched_indices: + continue + + ip_address = nd_item.fabric_management_ip + switch_role = nd_item.switch_role # SwitchRole enum or None + + seed_ip_match = ( + (seed_ip is not None and ip_address is not None and ip_address == seed_ip) + or bool(ignore_fields["seed_ip"]) + ) + role_match = ( + (role_expected is not None and switch_role is not None and switch_role == role_expected) + or bool(ignore_fields["role"]) + ) + + if seed_ip_match and role_match: + matched_indices.add(i) + found_match = True + if ignore_fields["seed_ip"]: + break + elif ( + seed_ip_match + and role_expected is not None + and switch_role is not None + and switch_role != role_expected + ) or ignore_fields["role"]: + role_mismatches.setdefault( + seed_ip or ip_address, + { + "expected_role": role_expected.value if role_expected else None, + "response_role": switch_role.value if switch_role else None, + }, + ) + matched_indices.add(i) + found_match = True + if ignore_fields["seed_ip"]: + break + + if not found_match and seed_ip is not None: + missing_ips.append(seed_ip) + + if not missing_ips and not role_mismatches: + self.response = True + else: + display.display("Invalid Data:") + if missing_ips: + display.display(" Missing IPs: {0}".format(missing_ips)) + if role_mismatches: + display.display(" Role mismatches: {0}".format(json.dumps(role_mismatches, indent=2))) + self.response = False + + return self + + +# --------------------------------------------------------------------------- +# Action plugin +# --------------------------------------------------------------------------- + +class ActionModule(ActionBase): + """Ansible action plugin for validating ND switch inventory data. + + Arguments (task args): + nd_data (dict): The registered result of a cisco.nd.nd_rest GET call. + test_data (list|dict): Expected switch entries, each with ``seed_ip`` + and optionally ``role``. + changed (bool, optional): If provided and False, the task fails + immediately (used to assert an upstream + operation produced a change). + mode (str, optional): ``"both"`` (default), ``"ip"``, or ``"role"``. + """ + + def run(self, tmp=None, task_vars=None): + results = super(ActionModule, self).run(tmp, task_vars) + results["failed"] = False + + if not HAS_PYDANTIC or not HAS_MODELS: + results["failed"] = True + results["msg"] = "pydantic and the ND collection models are required for nd_inventory_validate" + return results + + nd_data = self._task.args["nd_data"] + test_data = self._task.args["test_data"] + + # Fail fast if the caller signals that no change occurred when one was expected. + if "changed" in self._task.args and not self._task.args["changed"]: + results["failed"] = True + results["msg"] = 'Changed is "false"' + return results + + # Fail fast if the upstream nd_rest task itself failed. + if nd_data.get("failed"): + results["failed"] = True + results["msg"] = nd_data.get("msg", "ND module returned a failure") + return results + + # Extract switch list from nd_data.current.switches + switches = nd_data.get("current", {}).get("switches", []) + + # Normalise test_data to a list. + if isinstance(test_data, dict): + test_data = [test_data] + + # If both are empty treat as success; if only nd response is empty it's a failure. + if not switches and not test_data: + results["msg"] = "Validation Successful!" + return results + + if not switches: + results["failed"] = True + results["msg"] = "No switches found in ND response" + return results + + # Resolve matching mode via ignore_fields flags. + ignore_fields = {"seed_ip": 0, "role": 0} + if "mode" in self._task.args: + mode = self._task.args["mode"].lower() + if mode == "ip": + # IP mode: only match by seed_ip, ignore role + ignore_fields["role"] = 1 + elif mode == "role": + # Role mode: only match by role, ignore seed_ip + ignore_fields["seed_ip"] = 1 + + validation = InventoryValidate( + config_data=test_data, + nd_data=switches, + ignore_fields=ignore_fields, + response=None, + ) + + if validation.response: + results["msg"] = "Validation Successful!" + else: + results["failed"] = True + results["msg"] = "Validation Failed! Please check output above." + + return results + diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index b464966f..711bbb57 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -423,41 +423,24 @@ def to_config_dict(self) -> Dict[str, Any]: "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, }) - @model_validator(mode='before') - @classmethod - def reject_auth_proto_for_poap_rma(cls, data: Any) -> Any: + @model_validator(mode='after') + def reject_auth_proto_for_poap_rma(self) -> Self: """Reject non-MD5 auth_proto when POAP or RMA is configured. POAP, Pre-provision, and RMA operations always use MD5 internally. - If the user explicitly supplies a non-MD5 ``auth_proto`` (or - ``authProto``) alongside ``poap`` or ``rma``, raise an error so - they know the field is not user-configurable for these operation - types. - - Note: Ansible argspec injects the default ``"MD5"`` even when the - user omits ``auth_proto``, so we must allow MD5 through. + By validating mode='after', all inputs (raw strings, enum instances, + or Ansible argspec-injected defaults) have already been coerced by + Pydantic into a typed SnmpV3AuthProtocol value, so a direct enum + comparison is safe and unambiguous. """ - if not isinstance(data, dict): - return data - - has_poap = bool(data.get("poap")) - has_rma = bool(data.get("rma")) - - if has_poap or has_rma: - # Check both snake_case (Ansible playbook) and camelCase (API) keys - auth_val = data.get("auth_proto") or data.get("authProto") - if auth_val is not None: - # Normalize to lowercase for comparison - normalized = str(auth_val).strip().lower() - if normalized not in ("md5", ""): - op = "POAP" if has_poap else "RMA" - raise ValueError( - f"'auth_proto' must not be specified for {op} operations. " - f"The authentication protocol is always MD5 and is set " - f"automatically. Received: '{auth_val}'" - ) - - return data + if (self.poap or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: + op = "POAP" if self.poap else "RMA" + raise ValueError( + f"'auth_proto' must not be specified for {op} operations. " + f"The authentication protocol is always MD5 and is set " + f"automatically. Received: '{self.auth_proto.value}'" + ) + return self @model_validator(mode='after') def validate_poap_rma_mutual_exclusion(self) -> Self: diff --git a/plugins/module_utils/models/manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py index 0d3f85cc..edb8f28a 100644 --- a/plugins/module_utils/models/manage_switches/enums.py +++ b/plugins/module_utils/models/manage_switches/enums.py @@ -317,12 +317,16 @@ def choices(cls) -> List[str]: class AnomalyLevel(str, Enum): """ Anomaly level classification. + + Based on: components/schemas/anomalyLevel """ CRITICAL = "critical" MAJOR = "major" MINOR = "minor" WARNING = "warning" - INFO = "info" + HEALTHY = "healthy" + NOT_APPLICABLE = "notApplicable" + UNKNOWN = "unknown" @classmethod def choices(cls) -> List[str]: @@ -332,11 +336,16 @@ def choices(cls) -> List[str]: class AdvisoryLevel(str, Enum): """ Advisory level classification. + + Based on: components/schemas/advisoryLevel """ CRITICAL = "critical" MAJOR = "major" MINOR = "minor" + WARNING = "warning" + HEALTHY = "healthy" NONE = "none" + NOT_APPLICABLE = "notApplicable" @classmethod def choices(cls) -> List[str]: diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index af768af1..c5f4147b 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -299,6 +299,7 @@ def compute_changes( log.debug(f"Switch {ip} is idempotent — no changes needed") changes["idempotent"].append(prop_sw) else: + diff_keys = {k for k in set(prop_dict) | set(existing_dict) if prop_dict.get(k) != existing_dict.get(k)} log.info( f"Switch {ip} has differences — marking to_update. " f"Changed fields: {diff_keys}" @@ -671,7 +672,12 @@ def build_proposed( None, ) if existing_match: - proposed.append(existing_match) + if cfg.role is not None: + data = existing_match.model_dump(by_alias=True) + data["switchRole"] = cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role + proposed.append(SwitchDataModel.model_validate(data)) + else: + proposed.append(existing_match) log.debug( f"Switch {seed_ip} already in fabric inventory — " f"using existing record (discovery skipped)" @@ -2794,7 +2800,23 @@ def _handle_overridden_state( diff["to_update"] = [] - # Phase 3: Delegate add + migration to merged state + # Phase 3: Re-discover switches that were just deleted (they were + # skipped during initial discovery because they were already in the + # fabric). + update_ips = {sw.fabric_management_ip for sw in switches_to_delete} + configs_needing_rediscovery = [ + cfg for cfg in proposed_config if cfg.seed_ip in update_ips + ] + if configs_needing_rediscovery: + self.log.info( + f"Re-discovering {len(configs_needing_rediscovery)} switch(es) " + f"after deletion for re-add: " + f"{[cfg.seed_ip for cfg in configs_needing_rediscovery]}" + ) + fresh_discovered = self.discovery.discover(configs_needing_rediscovery) + discovered_data = {**(discovered_data or {}), **fresh_discovered} + + # Phase 4: Delegate add + migration to merged state self._handle_merged_state(diff, proposed_config, discovered_data) self.log.debug("EXIT: _handle_overridden_state()") diff --git a/tests/integration/targets/nd_manage_switches/defaults/main.yaml b/tests/integration/targets/nd_manage_switches/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/targets/nd_manage_switches/meta/main.yaml b/tests/integration/targets/nd_manage_switches/meta/main.yaml new file mode 100644 index 00000000..32cf5dda --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/meta/main.yaml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml new file mode 100644 index 00000000..da143944 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml @@ -0,0 +1,67 @@ +--- +- name: Test Entry Point - [nd_manage_switches] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing Base Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + +# -------------------------------- +# Create Dictionary of Test Data +# -------------------------------- +- name: Base - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw2: "{{ ansible_switch2 }}" + sw3: "{{ ansible_switch3 }}" + deploy: "{{ deploy }}" + delegate_to: localhost + +# ---------------------------------------------- +# Create Module Payloads using Jinja2 Templates +# ---------------------------------------------- + +- name: Base - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{test_data.sw1}}" + auth_proto: MD5 + role: leaf + - seed_ip: "{{test_data.sw2}}" + auth_proto: MD5 + role: spine + - seed_ip: "{{test_data.sw3}}" + auth_proto: MD5 + role: border + delegate_to: localhost + + +- name: Import Configuration Prepare Tasks + vars: + file: base + ansible.builtin.import_tasks: ./conf_prep_tasks.yaml + +# ---------------------------------------------- +# Test Setup +# ---------------------------------------------- + +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + +- name: Base - Clean Up Existing Devices in Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml b/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml new file mode 100644 index 00000000..dce2fdec --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml @@ -0,0 +1,11 @@ +--- +- name: Build Fabric Base Config Data + ansible.builtin.template: + src: nd_manage_switches_conf.j2 + dest: "{{ role_path }}/files/nd_manage_switches_{{file}}_conf.yaml" + delegate_to: localhost + +- name: Access Fabric Configuration Data and Save to Local Variable + ansible.builtin.set_fact: + "{{ 'nd_switches_' + file +'_conf' }}": "{{ lookup('file', '{{ role_path }}/files/nd_manage_switches_{{file}}_conf.yaml') | from_yaml }}" + delegate_to: localhost diff --git a/tests/integration/targets/nd_manage_switches/tasks/main.yaml b/tests/integration/targets/nd_manage_switches/tasks/main.yaml new file mode 100644 index 00000000..834955ba --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/main.yaml @@ -0,0 +1,17 @@ +--- +- name: Discover ND Test Cases + ansible.builtin.find: + paths: "{{ role_path }}/tests/nd" + patterns: "{{ testcase }}.yaml" + connection: local + register: nd_testcases + +- name: Build List of Test Items + ansible.builtin.set_fact: + test_items: "{{ nd_testcases.files | map(attribute='path') | list }}" + +- name: Run ND Test Cases + ansible.builtin.include_tasks: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml b/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml new file mode 100644 index 00000000..7f851042 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml @@ -0,0 +1,33 @@ +--- +- name: "Query Task: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response + delegate_to: localhost + +- name: "Query Task: Query {{ test_data.test_fabric }} switch data from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: query_result + delegate_to: localhost diff --git a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 new file mode 100644 index 00000000..fd4978fa --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 @@ -0,0 +1,62 @@ +--- +# This ND test data structure is auto-generated +# DO NOT EDIT MANUALLY +# + +# ------------------------------ +# Fabric Switches +# ------------------------------ + +{% if switch_conf is iterable %} +{% set switch_list = [] %} +{% for switch in switch_conf %} +{% set switch_item = {} %} +{% if switch.seed_ip is defined %} +{% set _ = switch_item.update({'seed_ip': switch.seed_ip | default('') }) %} +{% endif %} +{% set _ = switch_item.update({'user_name': switch_username}) %} +{% set _ = switch_item.update({'password': switch_password}) %} +{% if switch.role is defined %} +{% set _ = switch_item.update({'role': switch.role | default('') }) %} +{% endif %} +{% if switch.poap is defined %} +{% for sw_poap_item in switch.poap %} +{% set poap_item = {} %} +{% if sw_poap_item.preprovision_serial is defined and sw_poap_item.preprovision_serial %} +{% set _ = poap_item.update({'preprovision_serial': sw_poap_item.preprovision_serial}) %} +{% endif %} +{% if sw_poap_item.serial_number is defined and sw_poap_item.serial_number %} +{% set _ = poap_item.update({'serial_number': sw_poap_item.serial_number}) %} +{% endif %} +{% if sw_poap_item.model is defined and sw_poap_item.model %} +{% set _ = poap_item.update({'model': sw_poap_item.model}) %} +{% endif %} +{% if sw_poap_item.version is defined and sw_poap_item.version %} +{% set _ = poap_item.update({'version': sw_poap_item.version}) %} +{% endif %} +{% if sw_poap_item.hostname is defined and sw_poap_item.hostname %} +{% set _ = poap_item.update({'hostname': sw_poap_item.hostname}) %} +{% endif %} +{% if sw_poap_item.config_data is defined %} +{% set poap_config_item = {} %} +{% for sw_poap_config_item in sw_poap_item.config_data %} +{% set _ = poap_config_item.update({sw_poap_config_item: sw_poap_item.config_data[sw_poap_config_item]}) %} +{% endfor %} +{% set _ = poap_item.update({'config_data': poap_config_item}) %} +{% endif %} +{% set _ = switch_item.update({'poap': [poap_item]}) %} +{% endfor %} +{% else %} +{% if switch.auth_proto is defined %} +{% set _ = switch_item.update({'auth_proto': switch.auth_proto | default('') }) %} +{% endif %} +{% if switch.preserve_config is defined %} +{% set _ = switch_item.update({'preserve_config': switch.preserve_config | default('') }) %} +{% else %} +{% set _ = switch_item.update({'preserve_config': false }) %} +{% endif %} +{% endif %} +{% set _ = switch_list.append(switch_item) %} +{% endfor %} +{{ switch_list | to_nice_yaml(indent=2) }} +{% endif %} \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml new file mode 100644 index 00000000..97202466 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml @@ -0,0 +1,143 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: deleted + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# TC - 1 +- name: Deleted TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: deleted + +- name: Deleted TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC1 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{merged_result.changed}}" + register: result + tags: deleted + +# TC - 2 +- name: Deleted TC2 - Delete a Switch from the Fabric + cisco.nd.nd_manage_switches: &conf_del + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: + - seed_ip: "{{ test_data.sw1 }}" + register: delete_result + tags: deleted + +- name: Deleted TC2 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: "{{ nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw1) | list }}" + delegate_to: localhost + tags: deleted + +- name: Deleted TC2 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC2 - Validate nd Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: deleted + +# TC - 3 +- name: Deleted TC3 - Removing a previously Deleted Switch - Idempotence + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: + - seed_ip: "{{ test_data.sw1 }}" + register: delete_result + register: result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: result + tags: deleted + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is not part of the fabric and cannot be deleted"' + tags: deleted + +# TC - 4 +- name: Deleted TC4 - Delete all Switches from Fabric + cisco.nd.nd_manage_switches: &conf_del_all + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: delete_result + tags: deleted + +- name: Deleted TC4 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: [] + delegate_to: localhost + tags: deleted + +- name: Deleted TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC4 - Validate nd Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: deleted + +# TC - 5 +- name: Deleted TC5 - Delete all Switches from Fabric - Idempotence + cisco.nd.nd_manage_switches: *conf_del_all + register: result + tags: deleted + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + tags: deleted \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml new file mode 100644 index 00000000..6fb378d9 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml @@ -0,0 +1,64 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: query + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Query TC1 - Merge a Switch using GreenField Deployment + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: create_result + tags: query + +- name: Query TC1 - Gather Switch State in Fabric + cisco.nd.nd_manage_switches: + state: gathered + fabric: "{{ test_data.test_fabric }}" + register: query_result + tags: query + +- name: Query TC1 - Build Gathered Lookup + ansible.builtin.set_fact: + gathered_seeds: "{{ query_result.gathered | map(attribute='seed_ip') | list }}" + gathered_role_map: "{{ query_result.gathered | items2dict(key_name='seed_ip', value_name='role') }}" + delegate_to: localhost + tags: query + +- name: Query TC1 - Validate Gathered Count + ansible.builtin.assert: + that: + - query_result.gathered | length == nd_switches_base_conf | length + fail_msg: >- + Gathered count {{ query_result.gathered | length }} does not match + expected {{ nd_switches_base_conf | length }} + tags: query + +- name: Query TC1 - Validate Each Switch Present and Role Matches + ansible.builtin.assert: + that: + - item.seed_ip in gathered_seeds + - "'role' not in item or gathered_role_map[item.seed_ip] == item.role" + fail_msg: >- + Switch {{ item.seed_ip }} missing from gathered output or role mismatch + (expected={{ item.role | default('any') }}, + got={{ gathered_role_map[item.seed_ip] | default('not found') }}) + loop: "{{ nd_switches_base_conf }}" + tags: query + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Query - Cleanup Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: query diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml new file mode 100644 index 00000000..2b8dc056 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml @@ -0,0 +1,318 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: merged + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# TC - 1 +- name: Merged TC1 - Merge a Switch using GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: merged + +- name: Merged TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC1 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 2 +- name: Merged TC2 - Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + tags: merged + +# TC - 3 +- name: Merged TC3 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: delete_result + tags: merged + +- name: Merged TC3 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: [] + delegate_to: localhost + tags: merged + +- name: Merged TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC3 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: merged + +# TC - 4 +- name: Merged TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: leaf + auth_proto: MD5 + preserve_config: true + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC4 - Merge a Switch using BrownField Deployment + cisco.nd.nd_manage_switches: &conf_bf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: merged + +- name: Merged TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC4 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_merge_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 5 +- name: Merged TC5 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_bf + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + tags: merged + +# TC - 6 +- name: Merged TC6 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: merged + +- name: Merged TC6 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC6 - Merge a Switch using GreenField Deployment - Using default role/auth_proto + cisco.nd.nd_manage_switches: &conf_def + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: merged + +- name: Merged TC6 - Prepare Config + ansible.builtin.set_fact: + nd_switches_mergev_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: leaf # default role in ND + delegate_to: localhost + tags: merged + +- name: Merged TC6 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC6 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_mergev_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 7 +- name: Merged TC7 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_def + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + tags: merged + +# TC - 8 +- name: Merged TC8 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: merged + +# TC - 9 +- name: Merged TC9 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: + role: leaf + auth_proto: MD5 + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC9 - Merge a Switch without seed_ip + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + deploy: "{{ test_data.deploy }}" + ignore_errors: true + register: merged_result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"seed_ip cannot be empty" in merged_result.msg' + tags: merged + +# TC - 10 +- name: Merged TC10 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD5 + role: invalid + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC10 - Merge a Switch with Invalid Role + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"Invalid SwitchRole: invalid" in merged_result.msg' + tags: merged + +# TC - 11 +- name: Merged TC11 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD55DM + role: leaf + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC11 - Merge a Switch with invalid auth choice + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"Invalid SnmpV3AuthProtocol: MD55DM" in merged_result.msg' + tags: merged + +# TC - 12 +- name: Merged TC12 - Merge a Switch without a config + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"state is merged but all of the following are missing: config" in merged_result.msg' + tags: merged \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml new file mode 100644 index 00000000..75390bec --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml @@ -0,0 +1,166 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: overridden + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Overridden TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: overridden + +- name: Overridden TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC1 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: " {{ merged_result.changed }}" + register: result + tags: overridden + +# TC - 2 +- name: Overridden TC2 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: overridden + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + tags: overridden + +# TC - 3 +- name: Overridden TC3 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: spine + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC3 - Override Existing Switch - Removes Other Switches from Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + deploy: "{{ test_data.deploy }}" + register: overridden_result + tags: overridden + +- name: Overridden TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC3 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_overridden_conf }}" + changed: "{{ overridden_result.changed }}" + register: result + tags: overridden + +# TC - 4 +- name: Overridden TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC4 - New Role for the Existing Switch + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + deploy: "{{ test_data.deploy }}" + register: overridden_result + tags: overridden + +- name: Overridden TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC4 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_overridden_conf }}" + changed: "{{ overridden_result.changed }}" + register: result + tags: overridden + +# TC - 5 +- name: Overridden TC5 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC5 - Unspecified Role for the Existing Switch (Default, Leaf) + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + deploy: "{{ test_data.deploy }}" + register: overridden_result + tags: overridden + +- name: Assert + ansible.builtin.assert: + that: + - 'overridden_result.changed == false' + tags: overridden + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Overridden - Cleanup Fabric Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: overridden diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml new file mode 100644 index 00000000..c098c7ca --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml @@ -0,0 +1,265 @@ +--- +- name: Test Entry Point - [nd_manage_switches - Poap] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing Poap Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + tags: poap + +- name: Poap - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw1_serial: "1ABC23DEFGH" + sw2: "{{ ansible_switch2 }}" + sw2_serial: "1ABC23DEFHI" + poap_model: "ABC-D1230a" + poap_version: "1.2(3)" + prepro_hostname: "PreProv-SW" + poap_hostname: "Poap-SW" + poap_configmodel: "['ABC-D1230a']" + poap_gateway: "192.168.2.1/24" + sw3: "{{ ansible_switch3 }}" + deploy: "{{ deploy }}" + poap_enabled: false + delegate_to: localhost + tags: poap + +# Below commented tasks are sample tasks to enable Bootstrap and DHCP along with DHCP configs +# Please make sure you provide correct values for required fields +# Fabric config has many ND/DCNM auto generated values, so always GET the configs first +# and then set the required values. +# +# +# - name: Poap Merged - Get the configs of the fabric deployed. +# cisco.nd.nd_rest: +# path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" +# method: get +# register: result + +# - set_fact: +# result.jsondata.management.day0Bootstrap = true +# result.jsondata.management.localDhcpServer = true +# result.jsondata.management.dhcpProtocolVersion = "dhcpv4" +# result.jsondata.management.dhcpStartAddress = "192.168.1.10" +# result.jsondata.management.dhcpEndAddress = "192.168.1.20" +# result.jsondata.management.managementGateway = "192.168.1.1" +# result.jsondata.management.managementIpv4Prefix = "24" +# +# - name: Poap Merged - Configure Bootstrap and DHCP on Fabric +# cisco.nd.nd_rest: +# method: PUT +# path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" +# content: "{{ result.jsondata }}" +# + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# Base Tests +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + tags: poap + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + tags: poap + +- name: POAP Base Task - Set Variable + ansible.builtin.set_fact: + poap_enabled: true + when: fabric_query.status == 200 and fabric_query.jsondata.management.day0Bootstrap + tags: poap + +# TC1 +- name: POAP TC1 - Prepare Validate Config + ansible.builtin.set_fact: + nd_switches_delete_conf: + delegate_to: localhost + tags: poap + +- name: POAP TC1 - Clean Up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: poap + +- name: POAP TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: poap + +- name: POAP TC1 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + register: result + tags: poap + +# ---------------------------------------------- # +# Merged # +# ---------------------------------------------- # + +# TC - 1 +- name: Poap TC1 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + user_name: '{{ switch_username }}' + password: '{{ switch_password }}' + role: border + poap: + - preprovision_serial: "{{ test_data.sw2_serial }}" + model: "{{ test_data.poap_model }}" + version: "{{ test_data.poap_version }}" + hostname: "{{ test_data.prepro_hostname }}" + config_data: + models: "{{ test_data.poap_configmodel }}" + gateway: "{{ test_data.poap_gateway }}" + when: poap_enabled == True + delegate_to: localhost + tags: poap + +- name: Import Configuration Prepare Tasks + vars: + file: poap + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: poap_enabled == True + tags: poap + +- name: Poap TC1 - Merged - Pre-provisioned Switch Configuration + cisco.nd.nd_manage_switches: &conf_prepro + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_poap_conf }}" + deploy: "{{ test_data.deploy }}" + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Poap TC1 - Merged - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: poap_enabled == True + register: query_result + tags: poap + +- name: Poap TC1 - Merged - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_poap_conf }}" + changed: "{{ merged_result.changed }}" + when: poap_enabled == True + register: result + tags: poap + +# TC - 2 +- name: Poap TC2 - Merged - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_prepro + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + # - 'merged_result.response == "The switch provided is already part of the fabric and cannot be created again"' + when: poap_enabled == True + tags: poap + +# TC - 3 +- name: Poap TC3 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: leaf + poap: + - serial_number: "{{ test_data.sw1_serial }}" + model: "{{ test_data.poap_model }}" + version: "{{ test_data.poap_version }}" + hostname: "{{ test_data.poap_hostname }}" + config_data: + models: "{{ test_data.poap_configmodel }}" + gateway: "{{ test_data.poap_gateway }}" + - seed_ip: "{{ test_data.sw3 }}" + auth_proto: MD5 + role: spine + when: poap_enabled == True + delegate_to: localhost + tags: poap + +- name: Import Configuration Prepare Tasks + vars: + file: poap + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: poap_enabled == True + tags: poap + +- name: Poap TC3 - Merge Config + cisco.nd.nd_manage_switches: &conf_poap + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_poap_conf }}" + deploy: "{{ test_data.deploy }}" + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Poap TC3 - Merged - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: poap_enabled == True + register: query_result + tags: poap + +- name: Poap TC3 - Merged - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_poap_conf }}" + changed: "{{ merged_result.changed }}" + when: poap_enabled == True + register: result + tags: poap + +# TC - 4 +- name: Poap TC4 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_poap + when: poap_enabled == True + register: result + tags: poap + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + when: poap_enabled == True + tags: poap + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Poap - Clean Up Existing Devices + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + when: poap_enabled == True + register: deleted_result + tags: poap diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml new file mode 100644 index 00000000..d214ac33 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml @@ -0,0 +1,182 @@ +--- +- name: Test Entry Point - [nd_manage_switches - RMA] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing RMA Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + tags: rma + +- name: RMA - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw1_serial: "1ABC23DEFGH" + sw1_rma_serial: "1ABC23DERMA" + rma_model: "SW1-K1234v" + rma_version: "12.3(4)" + rma_hostname: "RMA-SW" + rma_configmodel: "['SW1-K1234v']" + rma_gateway: "192.168.2.1/24" + deploy: "{{ deploy }}" + rma_enabled: false + delegate_to: localhost + tags: rma + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# Base Tests +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + +- name: RMA Base Task - Set Variable + ansible.builtin.set_fact: + rma_enabled: true + when: fabric_query.status == 200 and fabric_query.jsondata.management.day0Bootstrap + tags: rma + +# TC1 +- name: RMA TC1 - Prepare Validate Config + ansible.builtin.set_fact: + nd_switches_delete_conf: + delegate_to: localhost + tags: rma + +- name: RMA TC1 - Clean Up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: rma + +- name: RMA TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: rma + +- name: RMA TC1 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + register: result + tags: rma + +# Tasks to add a switch to fabric and to configure and deploy +# the switch in maintenance mode. +# Please note that the switch should be shutdown after configuring it +# in maintenance mode + +# TC2 +- name: RMA TC2 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD5 + when: rma_enabled == True + delegate_to: localhost + tags: rma + +- name: Import Configuration Prepare Tasks + vars: + file: rma + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: rma_enabled == True + tags: rma + +- name: RMA TC2 - Add Switch to the Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_rma_conf }}" + deploy: "{{ test_data.deploy }}" + when: rma_enabled == True + register: merged_result + tags: rma + +- name: RMA TC2 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: rma_enabled == True + register: query_result + tags: rma + +- name: RMA TC2 - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_rma_conf }}" + when: rma_enabled == True + register: result + tags: rma + +- name: RMA TC2 - Change System Mode to Maintenance, Deploy and Block until Complete + cisco.nd.nd_rest: + path: "/api/v1/manage/inventory/switchActions/changeSystemMode?deploy=true&blocking=true" + method: POST + content: + mode: "maintenance" + switchIds: + - "{{ test_data.sw1_serial }}" + register: change_system_mode_result + when: (rma_enabled == True) + tags: rma + +# TC3 +- block: + - name: RMA TC3 - RMA the Existing Switch + cisco.nd.nd_manage_switches: + fabric: '{{ test_data.test_fabric }}' + state: merged + config: + - seed_ip: '{{ test_data.sw1 }}' + user_name: '{{ switch_username }}' + password: '{{ switch_password }}' + rma: + - serial_number: '{{ test_data.sw1_rma_serial }}' + old_serial: '{{ test_data.sw1_serial }}' + model: '{{ test_data.rma_model }}' + version: '{{ test_data.rma_version }}' + hostname: '{{ test_data.rma_hostname }}' + config_data: + models: '{{ test_data.rma_configmodel }}' + gateway: '{{ test_data.rma_gateway }}' + register: result + + - name: ASSERT - Check condition + ansible.builtin.assert: + that: + - 'result.changed == true' + + - name: ASSERT - Check condition + ansible.builtin.assert: + that: + - 'item["RETURN_CODE"] == 200' + loop: '{{ result.response }}' + when: (rma_enabled == True) + tags: rma + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: RMA - Clean Up - Remove Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: rma diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml new file mode 100644 index 00000000..f66b59ed --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml @@ -0,0 +1,184 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: sanity + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# ---------------------------------------------- # +# Merged # +# ---------------------------------------------- # + +# TC - 1 +- name: Sanity TC1 - Merged - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: create_result + tags: sanity + +- name: Sanity TC1 - Merged - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC1 - Merged - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{ create_result.changed }}" + register: result + tags: sanity + +# TC - 2 +- name: Sanity TC2 - Merged - Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + tags: sanity + +# ---------------------------------------------- # +# Query # +# ---------------------------------------------- # + +# # TC - 3 +# - name: Sanity TC3 - Query - Prepare Conf +# ansible.builtin.set_fact: +# nd_switches_sanity_conf: +# - seed_ip: "{{ test_data.sw1 }}" +# role: leaf +# delegate_to: localhost +# tags: sanity + +# - name: Sanity TC3 - Query - Query a Switch - Hostname and Role must match +# cisco.nd.nd_manage_switches: +# fabric: "{{ test_data.test_fabric }}" +# state: query +# config: "{{ nd_switches_sanity_conf }}" +# register: query_result +# tags: sanity + +# - name: Sanity TC3 - Query - Validate ND Data +# cisco.nd.nd_inventory_validate: +# nd_data: "{{ query_result }}" +# test_data: "{{ nd_switches_sanity_conf }}" +# changed: "{{ create_result.changed }}" +# register: result +# tags: sanity + +# ---------------------------------------------- # +# Overridden # +# ---------------------------------------------- # + +# TC - 4 +- name: Sanity TC4 - Overridden - Prepare Conf + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: sanity + +- name: Import Configuration Prepare Tasks + vars: + file: sanity + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: sanity + +- name: Sanity TC4 - Overridden - Update a New Switch using GreenField Deployment - Delete and Create - default role + cisco.nd.nd_manage_switches: &conf_over + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_sanity_conf }}" + deploy: "{{ test_data.deploy }}" + register: result + tags: sanity + +- name: Sanity TC4 - Overridden - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC4 - Overridden - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_sanity_conf }}" + changed: "{{ create_result.changed }}" + register: result + tags: sanity + +# TC - 5 +- name: Sanity TC5 - Overridden - Idempotence + cisco.nd.nd_manage_switches: *conf_over + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is already part of the fabric and there is no more device to delete in the fabric"' + tags: sanity + +# ---------------------------------------------- # +# Clean-up # +# ---------------------------------------------- # + +# TC - 6 +- name: Sanity TC6 - Deleted - Clean up Existing devices + cisco.nd.nd_manage_switches: &clean + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: "{{ nd_switches_sanity_conf }}" + register: deleted_result + tags: sanity + +- name: Sanity TC6 - Reset - Prepare Conf + ansible.builtin.set_fact: + nd_switches_sanity_conf: + delegate_to: localhost + tags: sanity + +- name: Sanity TC6 - Deleted - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC6 - Deleted - Validate ND Data + cisco.nd.nd_inventory_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_sanity_conf }}" + changed: "{{ deleted_result.changed }}" + register: result + tags: sanity + +# TC - 7 +- name: Sanity TC7 - Deleted - Idempotence + cisco.nd.nd_manage_switches: *clean + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + # - 'result.response == "The switch provided is not part of the fabric and cannot be deleted"' + tags: sanity \ No newline at end of file From 1c3b3c6a8274147e770671dd1d0260fd416bab7a Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 19 Aug 2025 12:44:17 -0400 Subject: [PATCH 025/109] [minor_change] Add nd_local_user as a new network resource module for Nexus Dashboard v4.1.0 and higher. --- plugins/module_utils/constants.py | 14 + plugins/module_utils/nd.py | 79 ++--- plugins/module_utils/nd_config_collection.py | 295 ++++++++++++++++++ plugins/module_utils/nd_network_resources.py | 202 ++++++++++++ plugins/module_utils/utils.py | 32 ++ plugins/modules/nd_local_user.py | 269 ++++++++++++++++ .../targets/nd_local_user/tasks/main.yml | 134 ++++++++ 7 files changed, 974 insertions(+), 51 deletions(-) create mode 100644 plugins/module_utils/nd_config_collection.py create mode 100644 plugins/module_utils/nd_network_resources.py create mode 100644 plugins/module_utils/utils.py create mode 100644 plugins/modules/nd_local_user.py create mode 100644 tests/integration/targets/nd_local_user/tasks/main.yml diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index 10de9edf..cbba61b3 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -157,6 +157,11 @@ "restart", "delete", "update", + "merged", + "replaced", + "overridden", + "deleted", + "gathered", ) INTERFACE_FLOW_RULES_TYPES_MAPPING = {"port_channel": "PORTCHANNEL", "physical": "PHYSICAL", "l3out_sub_interface": "L3_SUBIF", "l3out_svi": "SVI"} @@ -170,3 +175,12 @@ ND_SETUP_NODE_DEPLOYMENT_TYPE = {"physical": "cimc", "virtual": "vnode"} BACKUP_TYPE = {"config_only": "config-only", None: "config-only", "": "config-only", "full": "full"} + +USER_ROLES_MAPPING = { + "fabric_admin": "fabric-admin", + "observer": "observer", + "super_admin": "super-admin", + "support_engineer": "support-engineer", + "approver": "approver", + "designer": "designer", +} diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index 03ffc85f..5f528bb8 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -18,7 +18,6 @@ from ansible.module_utils.basic import json from ansible.module_utils.basic import env_fallback from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves import filterfalse from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils._text import to_native, to_text from ansible.module_utils.connection import Connection @@ -73,53 +72,27 @@ def cmp(a, b): def issubset(subset, superset): - """Recurse through nested dictionary and compare entries""" + """Recurse through a nested dictionary and check if it is a subset of another.""" - # Both objects are the same object - if subset is superset: - return True - - # Both objects are identical - if subset == superset: - return True - - # Both objects have a different type - if isinstance(subset) is not isinstance(superset): + if type(subset) is not type(superset): return False + if not isinstance(subset, dict): + if isinstance(subset, list): + return all(item in superset for item in subset) + return subset == superset + for key, value in subset.items(): - # Ignore empty values if value is None: - return True + continue - # Item from subset is missing from superset if key not in superset: return False - # Item has different types in subset and superset - if isinstance(superset.get(key)) is not isinstance(value): - return False + superset_value = superset.get(key) - # Compare if item values are subset - if isinstance(value, dict): - if not issubset(superset.get(key), value): - return False - elif isinstance(value, list): - try: - # NOTE: Fails for lists of dicts - if not set(value) <= set(superset.get(key)): - return False - except TypeError: - # Fall back to exact comparison for lists of dicts - diff = list(filterfalse(lambda i: i in value, superset.get(key))) + list(filterfalse(lambda j: j in superset.get(key), value)) - if diff: - return False - elif isinstance(value, set): - if not value <= superset.get(key): - return False - else: - if not value == superset.get(key): - return False + if not issubset(value, superset_value): + return False return True @@ -212,7 +185,7 @@ def __init__(self, module): self.previous = dict() self.proposed = dict() self.sent = dict() - self.stdout = None + self.stdout = "" # debug output self.has_modified = False @@ -266,8 +239,13 @@ def request( if file is not None: info = self.connection.send_file_request(method, uri, file, data, None, file_key, file_ext) else: +<<<<<<< HEAD if data: info = self.connection.send_request(method, uri, json.dumps(data)) +======= + if data is not None: + info = conn.send_request(method, uri, json.dumps(data)) +>>>>>>> 7c967c3 ([minor_change] Add nd_local_user as a new network resource module for Nexus Dashboard v4.1.0 and higher.) else: info = self.connection.send_request(method, uri) self.result["data"] = data @@ -324,6 +302,8 @@ def request( self.fail_json(msg="ND Error: {0}".format(self.error.get("message")), data=data, info=info) self.error = payload if "code" in payload: + if self.status == 404 and ignore_not_found_error: + return {} self.fail_json(msg="ND Error {code}: {message}".format(**payload), data=data, info=info, payload=payload) elif "messages" in payload and len(payload.get("messages")) > 0: self.fail_json(msg="ND Error {code} ({severity}): {message}".format(**payload["messages"][0]), data=data, info=info, payload=payload) @@ -520,30 +500,27 @@ def get_diff(self, unwanted=None): if not self.existing and self.sent: return True - existing = self.existing - sent = self.sent + existing = deepcopy(self.existing) + sent = deepcopy(self.sent) for key in unwanted: if isinstance(key, str): if key in existing: - try: - del existing[key] - except KeyError: - pass - try: - del sent[key] - except KeyError: - pass + del existing[key] + if key in sent: + del sent[key] elif isinstance(key, list): key_path, last = key[:-1], key[-1] try: existing_parent = reduce(dict.get, key_path, existing) - del existing_parent[last] + if existing_parent is not None: + del existing_parent[last] except KeyError: pass try: sent_parent = reduce(dict.get, key_path, sent) - del sent_parent[last] + if sent_parent is not None: + del sent_parent[last] except KeyError: pass return not issubset(sent, existing) diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py new file mode 100644 index 00000000..1cf86756 --- /dev/null +++ b/plugins/module_utils/nd_config_collection.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import sys +from copy import deepcopy +from functools import reduce + +# Python 2 and 3 compatibility (To be removed in the future) +if sys.version_info[0] >= 3: + from collections.abc import MutableMapping + iteritems = lambda d: d.items() +else: + from collections import MutableMapping + iteritems = lambda d: d.iteritems() + +# NOTE: Single-Index Hybrid Collection for ND Network Resource Module +class NDConfigCollection(MutableMapping): + + def __init__(self, identifier_keys, data=None, use_composite_keys=False): + self.identifier_keys = identifier_keys + self.use_composite_keys = use_composite_keys + + # Dual Storage + self._list = [] + self._map = {} + + if data: + for item in data: + self.add(item) + + # TODO: add a method to get nested keys, ex: get("spec", {}).get("onboardUrl") + def _get_identifier_value(self, config): + """Generates the internal map key based on the selected mode.""" + if self.use_composite_keys: + # Mode: Composite (Tuple of ALL keys) + values = [] + for key in self.identifier_keys: + val = config.get(key) + if val is None: + return None # Missing a required part + values.append(val) + return tuple(values) + else: + # Mode: Priority (First available key) + for key in self.identifier_keys: + if key in config: + return config[key] + return None + + # Magic Methods + def __getitem__(self, key): + return self._map[key] + + def __setitem__(self, key, value): + if key in self._map: + old_ref = self._map[key] + try: + idx = self._list.index(old_ref) + self._list[idx] = value + self._map[key] = value + except ValueError: + pass + else: + # Add new + self._list.append(value) + self._map[key] = value + + def __delitem__(self, key): + if key in self._map: + obj_ref = self._map[key] + del self._map[key] + self._list.remove(obj_ref) + else: + raise KeyError(key) + + def __iter__(self): + return iter(self._map) + + def __len__(self): + return len(self._list) + + def __eq__(self, other): + if isinstance(other, NDConfigCollection): + return self._list == other._list + elif isinstance(other, list): + return self._list == other + elif isinstance(other, dict): + return self._map == other + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return str(self._list) + + # Helper Methods + def _filter_dict(self, data, ignore_keys): + return {k: v for k, v in iteritems(data) if k not in ignore_keys} + + def _issubset(self, subset, superset): + if type(subset) is not type(superset): + return False + + if not isinstance(subset, dict): + if isinstance(subset, list): + return all(item in superset for item in subset) + return subset == superset + + for key, value in iteritems(subset): + if value is None: + continue + + if key not in superset: + return False + + superset_value = superset.get(key) + + if not self._issubset(value, superset_value): + return False + return True + + def _remove_unwanted_keys(self, data, unwanted_keys): + for key in unwanted_keys: + if isinstance(key, str): + if key in data: + del data[key] + elif isinstance(key, list) and len(key) > 0: + key_path, last = key[:-1], key[-1] + try: + parent = reduce(lambda d, k: d.get(k) if isinstance(d, dict) else None, key_path, data) + if isinstance(parent, dict) and last in parent: + del parent[last] + except (KeyError, TypeError): + pass + return data + + # Core Operations + def to_list(self): + return self._list + + def to_dict(self): + return self._map + + def copy(self): + return NDConfigCollection(self.identifier_keys, deepcopy(self._list), self.use_composite_keys) + + def add(self, config): + ident = self._get_identifier_value(config) + if ident is None: + mode = "Composite" if self.use_composite_keys else "Priority" + raise ValueError("[{0} Mode] Config missing required keys: {1}".format(mode, self.identifier_keys)) + + if ident in self._map: + self.__setitem__(ident, config) + else: + self._list.append(config) + self._map[ident] = config + + def merge(self, new_config): + ident = self._get_identifier_value(new_config) + if ident and ident in self._map: + self._map[ident].update(new_config) + else: + self.add(new_config) + + def replace(self, new_config): + ident = self._get_identifier_value(new_config) + if ident: + self[ident] = new_config + else: + self.add(new_config) + + def remove(self, identifiers): + # Try Map Removal + try: + target_key = self._get_identifier_value(identifiers) + if target_key and target_key in self._map: + self.__delitem__(target_key) + return + except Exception: + pass + + # Fallback: Linear Removal + to_remove = [] + for config in self._list: + match = True + for k, v in iteritems(identifiers): + if config.get(k) != v: + match = False + break + if match: + to_remove.append(self._get_identifier_value(config)) + + for ident in to_remove: + if ident in self._map: + self.__delitem__(ident) + + def get_by_key(self, key, default=None): + return self._map.get(key, default) + + def get_by_idenfiers(self, identifiers, default=None): + # Try Map Lookup + target_key = self._get_identifier_value(identifiers) + if target_key and target_key in self._map: + return self._map[target_key] + + # Fallback: Linear Lookup + valid_search_keys = [k for k in identifiers if k in self.identifier_keys] + if not valid_search_keys: + return default + + for config in self._list: + match = True + for k in valid_search_keys: + if config.get(k) != identifiers[k]: + match = False + break + if match: + return config + return default + + # Diff logic + def get_diff_config(self, new_config, unwanted_keys=None): + unwanted_keys = unwanted_keys or [] + + ident = self._get_identifier_value(new_config) + + if not ident or ident not in self._map: + return "new" + + existing = deepcopy(self._map[ident]) + sent = deepcopy(new_config) + + self._remove_unwanted_keys(existing, unwanted_keys) + self._remove_unwanted_keys(sent, unwanted_keys) + + is_subset = self._issubset(sent, existing) + + if is_subset: + return "no_diff" + else: + return "changed" + + def get_diff_collection(self, new_collection, unwanted_keys=None): + if not isinstance(new_collection, NDConfigCollection): + raise TypeError("Argument must be an NDConfigCollection") + + if len(self) != len(new_collection): + return True + + for item in new_collection.to_list(): + if self.get_diff_config(item, unwanted_keys) != "no_diff": + return True + + for ident in self._map: + if ident not in new_collection._map: + return True + + return False + + def get_diff_identifiers(self, new_collection): + current_identifiers = set(self.config_collection.keys()) + other_identifiers = set(new_collection.config_collection.keys()) + + return list(current_identifiers - other_identifiers) + + # Sanitize Operations + def sanitize(self, keys_to_remove=None, values_to_remove=None, remove_none_values=False): + keys_to_remove = keys_to_remove or [] + values_to_remove = values_to_remove or [] + + def recursive_clean(obj): + if isinstance(obj, dict): + keys = list(obj.keys()) + for k in keys: + v = obj[k] + if k in keys_to_remove or v in values_to_remove or (remove_none_values and v is None): + del obj[k] + continue + if isinstance(v, (dict, list)): + recursive_clean(v) + elif isinstance(obj, list): + for item in obj: + recursive_clean(item) + + for item in self._list: + recursive_clean(item) diff --git a/plugins/module_utils/nd_network_resources.py b/plugins/module_utils/nd_network_resources.py new file mode 100644 index 00000000..b73b24e7 --- /dev/null +++ b/plugins/module_utils/nd_network_resources.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from copy import deepcopy +from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection +from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED + +# TODO: Make further enhancement to logs and outputs +# NOTE: ONLY works for new API endpoints introduced in ND v4.1.0 and later +class NDNetworkResourceModule(NDModule): + + def __init__(self, module, path, identifier_keys, use_composite_keys=False, actions_overwrite_map=None): + super().__init__(module) + + # Initial variables + self.path = path + self.actions_overwrite_map = actions_overwrite_map or {} + self.identifier_keys = identifier_keys + self.use_composite_keys = use_composite_keys + + # Initial data + self.init_all_data = self._query_all() + + # Info ouput + self.existing = NDConfigCollection(identifier_keys, data=self.init_all_data) + self.previous = NDConfigCollection(identifier_keys) + self.proposed = NDConfigCollection(identifier_keys) + self.sent = NDConfigCollection(identifier_keys) + + # Debug output + self.nd_logs = [] + + # Helper variables + self.current_identifier = "" + self.existing_config = {} + self.proposed_config = {} + + # Actions Operations + def actions_overwrite(action): + def decorator(func): + def wrapper(self, *args, **kwargs): + overwrite_action = self.actions_overwrite_map.get(action) + if callable(overwrite_action): + return overwrite_action(self) + else: + return func(self, *args, **kwargs) + return wrapper + return decorator + + @actions_overwrite("create") + def _create(self): + if not self.module.check_mode: + return self.request(path=self.path, method="POST", data=self.proposed_config) + + @actions_overwrite("update") + def _update(self): + if not self.module.check_mode: + object_path = "{0}/{1}".format(self.path, self.current_identifier) + return self.request(path=object_path, method="PUT", data=self.proposed_config) + + @actions_overwrite("delete") + def _delete(self): + if not self.module.check_mode: + object_path = "{0}/{1}".format(self.path, self.current_identifier) + self.request(path=object_path, method="DELETE") + + @actions_overwrite("query_all") + def _query_all(self): + return self.query_obj(self.path) + + def format_log(self, identifier, status, after_data, sent_payload_data=None): + item_result = { + "identifier": identifier, + "status": status, + "before": self.existing_config, + "after": deepcopy(after_data) if after_data is not None else self.existing_config, + "sent_payload": deepcopy(sent_payload_data) if sent_payload_data is not None else {}, + } + + if not self.module.check_mode and self.url is not None: + item_result.update( + { + "method": self.method, + "response": self.response, + "status": self.status, + "url": self.url, + } + ) + + self.nd_logs.append(item_result) + + # Logs and Outputs formating Operations + def add_logs_and_ouputs(self): + if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + if self.params.get("output_level") in ("debug", "info"): + self.result["previous"] = self.previous.to_list() + if not self.has_modified and self.previous.get_diff_collection(self.existing): + self.result["changed"] = True + if self.stdout: + self.result["stdout"] = self.stdout + + if self.params.get("output_level") == "debug": + self.result["nd_logs"] = self.nd_logs + if self.url is not None: + self.result["httpapi_logs"] = self.httpapi_logs + + if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + self.result["sent"] = self.sent.to_list() + self.result["proposed"] = self.proposed.to_list() + + self.result["current"] = self.existing.to_list() + + # Manage State Operations + def manage_state(self, state, new_configs, unwanted_keys=None, override_exceptions=None): + unwanted_keys = unwanted_keys or [] + override_exceptions = override_exceptions or [] + + self.proposed = NDConfigCollection(self.identifier_keys, data=new_configs) + self.proposed.sanitize() + self.previous = self.existing.copy() + + if state in ["merged", "replaced", "overidden"]: + for identifier, config in self.proposed.items(): + + diff_config_info = self.existing.get_diff_config(config, unwanted_keys) + self.current_identifier = identifier + self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) + self.proposed_config = config + request_response = None + sent_payload = None + status = "no_change" + + if diff_config_info != "no_diff": + if state == "merged": + self.existing.merge(config) + self.proposed_config = self.existing[identifier] + else: + self.existing.replace(config) + + if diff_config_info == "changed": + request_response = self._update() + status = "updated" + else: + request_response = self._create() + status= "created" + + if not self.module.check_mode: + self.sent.add(self.proposed_config) + sent_payload = self.proposed_config + else: + request_response = self.proposed_config + + self.format_log(identifier, status, request_response, sent_payload) + + + if state == "overidden": + diff_identifiers = self.previous.get_diff_identifiers(self.proposed) + for identifier in diff_identifiers: + if identifier not in override_exceptions: + self.current_identifier = identifier + self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) + self._delete() + del self.existing[identifier] + self.format_log(identifier, "deleted", after_data={}) + + + elif state == "deleted": + for identifier, config in self.proposed.items(): + if identifier in self.existing.keys(): + self.current_identifier = identifier + self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) + self.proposed_config = config + self._delete() + del self.existing[identifier] + self.format_log(identifier, "deleted", after_data={}) + + # Outputs Operations + def fail_json(self, msg, **kwargs): + self.add_logs_and_ouputs() + + self.result.update(**kwargs) + self.module.fail_json(msg=msg, **self.result) + + def exit_json(self, **kwargs): + self.add_logs_and_ouputs() + + if self.module._diff and self.result.get("changed") is True: + self.result["diff"] = dict( + before=self.previous.to_list(), + after=self.existing.to_list(), + ) + + self.result.update(**kwargs) + self.module.exit_json(**self.result) diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py new file mode 100644 index 00000000..5bf0a0f0 --- /dev/null +++ b/plugins/module_utils/utils.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from copy import deepcopy + + +def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): + if keys is None: + keys = [] + if values is None: + values = [] + + result = deepcopy(dict_to_sanitize) + for k, v in dict_to_sanitize.items(): + if k in keys: + del result[k] + elif v in values or (v is None and remove_none_values): + del result[k] + elif isinstance(v, dict) and recursive: + result[k] = sanitize_dict(v, keys, values) + elif isinstance(v, list) and recursive: + for index, item in enumerate(v): + if isinstance(item, dict): + result[k][index] = sanitize_dict(item, keys, values) + return result \ No newline at end of file diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py new file mode 100644 index 00000000..552df3b7 --- /dev/null +++ b/plugins/modules/nd_local_user.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: nd_local_user +version_added: "1.4.0" +short_description: Manage local users on Cisco Nexus Dashboard +description: +- Manage local users on Cisco Nexus Dashboard (ND). +- It supports creating, updating, querying, and deleting local users. +author: +- Gaspard Micol (@gmicol) +options: + config: + description: + - The list of the local users to configure. + type: list + elements: dict + suboptions: + email: + description: + - The email address of the local user. + type: str + login_id: + description: + - The login ID of the local user. + - The O(config.login_id) must be defined when creating, updating or deleting a local user. + type: str + required: true + first_name: + description: + - The first name of the local user. + type: str + last_name: + description: + - The last name of the local user. + type: str + user_password: + description: + - The password of the local user. + - Password must have a minimum of 8 characters to a maximum of 64 characters. + - Password must have three of the following; one number, one lower case character, one upper case character, one special character. + - The O(config.user_password) must be defined when creating a new local_user. + type: str + reuse_limitation: + description: + - The number of different passwords a user must use before they can reuse a previous one. + - It defaults to C(0) when unset during creation. + type: int + time_interval_limitation: + description: + - The minimum time period that must pass before a previous password can be reused. + - It defaults to C(0) when unset during creation. + type: int + security_domains: + description: + - The list of Security Domains and Roles for the local user. + - At least, one Security Domain must be defined when creating a new local user. + type: list + elements: dict + suboptions: + name: + description: + - The name of the Security Domain to which the local user is given access. + type: str + required: true + aliases: [ security_domain_name, domain_name ] + roles: + description: + - The Permission Roles of the local user within the Security Domain. + type: list + elements: str + choices: [ fabric_admin, observer, super_admin, support_engineer, approver, designer ] + aliases: [ domains ] + remote_id_claim: + description: + - The remote ID claim of the local user. + type: str + remote_user_authorization: + description: + - To enable/disable the Remote User Authorization of the local user. + - Remote User Authorization is used for signing into Nexus Dashboard when using identity providers that cannot provide authorization claims. + Once this attribute is enabled, the local user ID cannot be used to directly login to Nexus Dashboard. + - It defaults to C(false) when unset during creation. + type: bool + state: + description: + - The desired state of the network resources on the Cisco Nexus Dashboard. + - Use O(state=merged) to create new resources and updates existing ones as defined in your configuration. + Resources on ND that are not specified in the configuration will be left unchanged. + - Use O(state=replaced) to replace the resources specified in the configuration. + - Use O(state=overridden) to enforce the configuration as the single source of truth. + The resources on ND will be modified to exactly match the configuration. + Any resource existing on ND but not present in the configuration will be deleted. Use with extra caution. + - Use O(state=deleted) to remove the resources specified in the configuration from the Cisco Nexus Dashboard. + type: str + default: merged + choices: [ merged, replaced, overridden, deleted ] +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module is only supported on Nexus Dashboard having version 4.1.0 or higher. +- This module is not idempotent when creating or updating a local user object when O(config.user_password) is used. +""" + +EXAMPLES = r""" +- name: Create a new local user + cisco.nd.nd_local_user: + config: + - email: user@example.com + login_id: local_user + first_name: User first name + last_name: User last name + user_password: localUserPassword1% + reuse_limitation: 20 + time_interval_limitation: 10 + security_domains: + name: all + roles: + - observer + - support_engineer + remote_id_claim: remote_user + remote_user_authorization: true + state: merged + register: result + +- name: Create local user with minimal configuration + cisco.nd.nd_local_user: + config: + - login_id: local_user_min + user_password: localUserMinuser_password + security_domain: all + state: merged + +- name: Update local user + cisco.nd.nd_local_user: + config: + - email: udpateduser@example.com + login_id: local_user + first_name: Updated user first name + last_name: Updated user last name + user_password: updatedLocalUserPassword1% + reuse_limitation: 25 + time_interval_limitation: 15 + security_domains: + - name: all + roles: super_admin + - name: ansible_domain + roles: observer + roles: super_admin + remote_id_claim: "" + remote_user_authorization: false + state: replaced + +- name: Delete a local user + cisco.nd.nd_local_user: + config: + - login_id: local_user + state: deleted +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec, NDModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_network_resources import NDNetworkResourceModule +from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING + + +# Actions overwrite functions +def quey_all_local_users(nd): + return nd.query_obj(nd.path).get("localusers") + + +def main(): + argument_spec = nd_argument_spec() + argument_spec.update( + config=dict( + type="list", + elements="dict", + options=dict( + email=dict(type="str"), + login_id=dict(type="str", required=True), + first_name=dict(type="str"), + last_name=dict(type="str"), + user_password=dict(type="str", no_log=True), + reuse_limitation=dict(type="int"), + time_interval_limitation=dict(type="int"), + security_domains=dict( + type="list", + elements="dict", + options=dict( + name=dict(type="str", required=True, aliases=["security_domain_name", "domain_name"]), + roles=dict(type="list", elements="str", choices=list(USER_ROLES_MAPPING)), + ), + aliases=["domains"], + ), + remote_id_claim=dict(type="str"), + remote_user_authorization=dict(type="bool"), + ), + ), + override_exceptions=dict(type="list", elements="str"), + state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + path = "/api/v1/infra/aaa/localUsers" + identifier_keys = ["loginID"] + actions_overwrite_map = {"query_all": quey_all_local_users} + + nd = NDNetworkResourceModule(module, path, identifier_keys, actions_overwrite_map=actions_overwrite_map) + + state = nd.params.get("state") + config = nd.params.get("config") + override_exceptions = nd.params.get("override_exceptions") + new_config = [] + for object in config: + payload = { + "email": object.get("email"), + "firstName": object.get("first_name"), + "lastName": object.get("last_name"), + "loginID": object.get("login_id"), + "password": object.get("user_password"), + "remoteIDClaim": object.get("remote_id_claim"), + "xLaunch": object.get("remote_user_authorization"), + } + + if object.get("security_domains"): + payload["rbac"] = { + "domains": { + security_domain.get("name"): { + "roles": ( + [USER_ROLES_MAPPING.get(role) for role in security_domain["roles"]] if isinstance(security_domain.get("roles"), list) else [] + ) + } + for security_domain in object["security_domains"] + }, + } + if object.get("reuse_limitation") or object.get("time_interval_limitation"): + payload["passwordPolicy"] = { + "reuseLimitation": object.get("reuse_limitation"), + "timeIntervalLimitation": object.get("time_interval_limitation"), + } + new_config.append(payload) + + nd.manage_state(state=state, new_configs=new_config, unwanted_keys=[["passwordPolicy", "passwordChangeTime"], ["userID"]], override_exceptions=override_exceptions) + + nd.exit_json() + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/nd_local_user/tasks/main.yml b/tests/integration/targets/nd_local_user/tasks/main.yml new file mode 100644 index 00000000..77e55cd1 --- /dev/null +++ b/tests/integration/targets/nd_local_user/tasks/main.yml @@ -0,0 +1,134 @@ +# Test code for the ND modules +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Test that we have a Nexus Dashboard host, username and password + ansible.builtin.fail: + msg: 'Please define the following variables: ansible_host, ansible_user and ansible_password.' + when: ansible_host is not defined or ansible_user is not defined or ansible_password is not defined + +- name: Set vars + ansible.builtin.set_fact: + nd_info: &nd_info + output_level: '{{ api_key_output_level | default("debug") }}' + +- name: Ensure local users do not exist before test starts + cisco.nd.nd_local_user: + <<: *nd_info + config: + - login_id: ansible_local_user + - login_id: ansible_local_user_2 + state: deleted + +# CREATE +- name: Create local users with full and minimum configuration (check mode) + cisco.nd.nd_local_user: &create_local_user + <<: *nd_info + config: + - email: ansibleuser@example.com + login_id: ansible_local_user + first_name: Ansible first name + last_name: Ansible last name + user_password: ansibleLocalUserPassword1%Test + reuse_limitation: 20 + time_interval_limitation: 10 + security_domains: + - name: all + roles: + - observer + - support_engineer + remote_id_claim: ansible_remote_user + remote_user_authorization: true + - login_id: ansible_local_user_2 + user_password: ansibleLocalUser2Password1%Test + security_domains: + - name: all + state: merged + check_mode: true + register: cm_create_local_user + +- name: Create local users with full and minimum configuration (normal mode) + cisco.nd.nd_local_user: + <<: *create_local_user + register: nm_create_local_user + +# UPDATE +- name: Update all ansible_local_user's attributes (check mode) + cisco.nd.nd_local_user: &update_first_local_user + <<: *nd_info + config: + - email: updatedansibleuser@example.com + login_id: ansible_local_user + first_name: Updated Ansible first name + last_name: Updated Ansible last name + user_password: updatedAnsibleLocalUserPassword1% + reuse_limitation: 25 + time_interval_limitation: 15 + security_domains: + - name: all + roles: super_admin + remote_id_claim: "" + remote_user_authorization: false + state: replaced + check_mode: true + register: cm_update_local_user + +- name: Update local user (normal mode) + cisco.nd.nd_local_user: + <<: *update_first_local_user + register: nm_update_local_user + +- name: Update all ansible_local_user_2's attributes except password + cisco.nd.nd_local_user: &update_second_local_user + <<: *nd_info + config: + - email: secondansibleuser@example.com + login_id: ansible_local_user_2 + first_name: Second Ansible first name + last_name: Second Ansible last name + reuse_limitation: 20 + time_interval_limitation: 10 + security_domains: + - name: all + roles: fabric_admin + remote_id_claim: ansible_remote_user_2 + remote_user_authorization: true + state: merged + register: nm_update_local_user_2 + +- name: Update all ansible_local_user_2's attributes except password again (idempotency) + cisco.nd.nd_local_user: + <<: *update_second_local_user + register: nm_update_local_user_2_again + + +# DELETE +- name: Delete local user by name (check mode) + cisco.nd.nd_local_user: &delete_local_user + <<: *nd_info + config: + - login_id: ansible_local_user + state: deleted + check_mode: true + register: cm_delete_local_user + +- name: Delete local user by name (normal mode) + cisco.nd.nd_local_user: + <<: *delete_local_user + register: nm_delete_local_user + +- name: Delete local user again (idempotency test) + cisco.nd.nd_local_user: + <<: *delete_local_user + register: nm_delete_local_user_again + + +# CLEAN UP +- name: Ensure local users do not exist + cisco.nd.nd_local_user: + <<: *nd_info + config: + - login_id: ansible_local_user + - login_id: ansible_local_user_2 + state: deleted From c70a93f06f4a481879a0ffc74e0dfdce24a680c7 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 15 Jan 2026 11:47:32 -0500 Subject: [PATCH 026/109] [ignore] First Pydantic implementation: Add Pydantic Models for nd_local_user. --- .../module_utils/models/local_user_model.py | 142 ++++++++++++++++++ plugins/module_utils/nd_config_collection.py | 1 + plugins/module_utils/nd_network_resources.py | 2 + plugins/modules/nd_local_user.py | 5 +- 4 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 plugins/module_utils/models/local_user_model.py diff --git a/plugins/module_utils/models/local_user_model.py b/plugins/module_utils/models/local_user_model.py new file mode 100644 index 00000000..f8de1f46 --- /dev/null +++ b/plugins/module_utils/models/local_user_model.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +from typing import List, Dict, Any, Optional +from pydantic import BaseModel, ConfigDict, Field, field_validator + +# TODO: Add Field validation methods +# TODO: Add a method to get identifier(s) -> define a generic NDNetworkResourceModel +# TODO: Maybe define our own baseModel +# TODO: Look at ansible aliases +from pydantic import BaseModel, Field, ConfigDict +from typing import List, Dict, Any, Optional + +class SecurityDomainModel(BaseModel): + model_config = ConfigDict( + str_strip_whitespace=True, + use_enum_values=True, + validate_assignment=True, + populate_by_name=True, + ) + + name: str = Field(alias="name") + roles: list[str] = Field(default_factory=lambda: ["observer"], alias="roles") + + +class LocalUserModel(BaseModel): + model_config = ConfigDict( + str_strip_whitespace=True, + use_enum_values=True, + validate_assignment=True, + populate_by_name=True, + ) + + email: str = Field(default="", alias="email") + login_id: str = Field(alias="loginID") + first_name: str = Field(default="", alias="firstName") + last_name: str = Field(default="", alias="lastName") + user_password: str = Field(alias="password") + reuse_limitation: int = Field(default=0, alias="reuseLimitation") + time_interval_limitation: int = Field(default=0, alias="timeIntervalLimitation") + security_domains: List[SecurityDomainModel] = Field(default_factory=list, alias="domains") + remote_id_claim: str = Field(default="", alias="remoteIDClaim") + remote_user_authorization: bool = Field(default=False, alias="xLaunch") + + def to_api_payload(self, user_roles_mapping: Dict[str, str] = None) -> Dict[str, Any]: + """Convert the model to the specific API payload format required.""" + if user_roles_mapping is None: + user_roles_mapping = {} + + base_data = self.model_dump(by_alias=True, exclude={'domains', 'reuseLimitation', 'timeIntervalLimitation'}) + + payload = { + "email": base_data.get("email"), + "firstName": base_data.get("firstName"), + "lastName": base_data.get("lastName"), + "loginID": base_data.get("loginID"), + "password": base_data.get("password"), + "remoteIDClaim": base_data.get("remoteIDClaim"), + "xLaunch": base_data.get("xLaunch"), + } + + if self.security_domains: + payload["rbac"] = { + "domains": { + domain.name: { + "roles": [ + user_roles_mapping.get(role, role) for role in domain.roles + ] + } + for domain in self.security_domains + } + } + + if self.reuse_limitation or self.time_interval_limitation: + payload["passwordPolicy"] = { + "reuseLimitation": self.reuse_limitation, + "timeIntervalLimitation": self.time_interval_limitation, + } + + return payload + + @classmethod + def from_api_payload( + cls, + payload: Dict[str, Any], + reverse_user_roles_mapping: Optional[Dict[str, str]] = None + ) -> 'LocalUserModel': + + if reverse_user_roles_mapping is None: + reverse_user_roles_mapping = {} + + user_data = { + "email": payload.get("email", ""), + "loginID": payload.get("loginID", ""), + "firstName": payload.get("firstName", ""), + "lastName": payload.get("lastName", ""), + "password": payload.get("password", ""), + "remoteIDClaim": payload.get("remoteIDClaim", ""), + "xLaunch": payload.get("xLaunch", False), + } + + password_policy = payload.get("passwordPolicy", {}) + user_data["reuseLimitation"] = password_policy.get("reuseLimitation", 0) + user_data["timeIntervalLimitation"] = password_policy.get("timeIntervalLimitation", 0) + + domains_data = [] + rbac = payload.get("rbac", {}) + if rbac and "domains" in rbac: + for domain_name, domain_config in rbac["domains"].items(): + # Map API roles back to internal roles + api_roles = domain_config.get("roles", []) + internal_roles = [ + reverse_user_roles_mapping.get(role, role) for role in api_roles + ] + + domain_data = { + "name": domain_name, + "roles": internal_roles + } + domains_data.append(domain_data) + + user_data["domains"] = domains_data + + return cls(**user_data) + + # @classmethod + # def from_api_payload_json( + # cls, + # json_payload: str, + # reverse_user_roles_mapping: Optional[Dict[str, str]] = None + # ) -> 'LocalUserModel': + + # payload = json.loads(json_payload) + # return cls.from_api_payload(payload, reverse_user_roles_mapping) diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 1cf86756..8f0058bb 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -20,6 +20,7 @@ from collections import MutableMapping iteritems = lambda d: d.iteritems() +# TODO: Adapt to Pydantic Models # NOTE: Single-Index Hybrid Collection for ND Network Resource Module class NDConfigCollection(MutableMapping): diff --git a/plugins/module_utils/nd_network_resources.py b/plugins/module_utils/nd_network_resources.py index b73b24e7..3b549da1 100644 --- a/plugins/module_utils/nd_network_resources.py +++ b/plugins/module_utils/nd_network_resources.py @@ -14,6 +14,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED # TODO: Make further enhancement to logs and outputs +# TODO: Adapt to Pydantic Models # NOTE: ONLY works for new API endpoints introduced in ND v4.1.0 and later class NDNetworkResourceModule(NDModule): @@ -98,6 +99,7 @@ def format_log(self, identifier, status, after_data, sent_payload_data=None): self.nd_logs.append(item_result) # Logs and Outputs formating Operations + # TODO: Move it to different file def add_logs_and_ouputs(self): if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: if self.params.get("output_level") in ("debug", "info"): diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 552df3b7..4a5f1ad2 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -181,10 +181,11 @@ # Actions overwrite functions -def quey_all_local_users(nd): +def query_all_local_users(nd): return nd.query_obj(nd.path).get("localusers") +# TODO: Adapt to Pydantic Model def main(): argument_spec = nd_argument_spec() argument_spec.update( @@ -223,7 +224,7 @@ def main(): path = "/api/v1/infra/aaa/localUsers" identifier_keys = ["loginID"] - actions_overwrite_map = {"query_all": quey_all_local_users} + actions_overwrite_map = {"query_all": query_all_local_users} nd = NDNetworkResourceModule(module, path, identifier_keys, actions_overwrite_map=actions_overwrite_map) From 267e2a84deb552978a5ec1ca5b0c0f595a657419 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 20 Jan 2026 13:17:35 -0500 Subject: [PATCH 027/109] [ignore] Second Pydantic Implementation: Create a NDBaseModel to be inherited from future class models. Modify class models for local_user. --- plugins/module_utils/models/base.py | 57 +++++++ plugins/module_utils/models/local_user.py | 116 ++++++++++++++ .../module_utils/models/local_user_model.py | 142 ------------------ 3 files changed, 173 insertions(+), 142 deletions(-) create mode 100644 plugins/module_utils/models/base.py create mode 100644 plugins/module_utils/models/local_user.py delete mode 100644 plugins/module_utils/models/local_user_model.py diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py new file mode 100644 index 00000000..e7301d14 --- /dev/null +++ b/plugins/module_utils/models/base.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from abc import ABC, abstractmethod +from pydantic import BaseModel, ConfigDict +from typing import List, Dict, Any, Optional, ClassVar + + +class NDBaseModel(BaseModel, ABC): + + model_config = ConfigDict( + str_strip_whitespace=True, + use_enum_values=True, + validate_assignment=True, + populate_by_name=True, + ) + + # TODO: find ways to redifine these var in every + identifiers: ClassVar[List[str]] = [] + use_composite_identifiers: ClassVar[bool] = False + + @abstractmethod + def to_payload(self) -> Dict[str, Any]: + pass + + @classmethod + @abstractmethod + def from_response(cls, response: Dict[str, Any]) -> 'NDBaseModel': + pass + + # TODO: Modify to make it more generic and Pydantic + # TODO: add a method to get nested keys, ex: get("spec", {}).get("onboardUrl") + def get_identifier_value(self) -> Any: + """Generates the internal map key based on the selected mode.""" + # if self.use_composite_keys: + # # Mode: Composite (Tuple of ALL keys) + # values = [] + # for key in self.identifier_keys: + # val = config.get(key) + # if val is None: + # return None # Missing a required part + # values.append(val) + # return tuple(values) + # else: + # # Mode: Priority (First available key) + # for key in self.identifier_keys: + # if key in config: + # return config[key] + # return None + pass diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py new file mode 100644 index 00000000..7877a5a5 --- /dev/null +++ b/plugins/module_utils/models/local_user.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from pydantic import Field, field_validator +from types import MappingProxyType +from typing import List, Dict, Any, Optional, ClassVar + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +# TODO: Add Field validation methods +# TODO: define our own Field class for string versioning, ansible aliases +# TODO: Add a method to get identifier(s) -> define a generic NDNetworkResourceModel +# TODO: Surclass BaseModel -> Priority +# TODO: Look at ansible aliases + +# TODO: use constants.py file in the future +user_roles_mapping = MappingProxyType({}) + + +class LocalUserSecurityDomainModel(NDBaseModel): + + name: str = Field(alias="name") + roles: list[str] = Field(default_factory=lambda: ["observer"], alias="roles") + + def to_payload(self) -> Dict[str, Any]: + return { + self.name: { + "roles": [ + user_roles_mapping.get(role, role) for role in self.roles + ] + } + } + + @classmethod + def from_response(cls, name: str, domain_config: List[str]) -> 'NDBaseModel': + internal_roles = [user_roles_mapping.get(role, role) for role in domain_config.get("roles", [])] + + domain_data = { + "name": name, + "roles": internal_roles + } + + return cls(**domain_data) + + +class LocalUserModel(NDBaseModel): + + # TODO: Define a way to generate it (look at NDBaseModel comments) + identifiers: ClassVar[List[str]] = ["login_id"] + + # TODO: Use Optinal to remove default values (get them from API response instead) + email: str = Field(default="", alias="email") + login_id: str = Field(alias="loginID") + first_name: str = Field(default="", alias="firstName") + last_name: str = Field(default="", alias="lastName") + user_password: str = Field(alias="password") + reuse_limitation: int = Field(default=0, alias="reuseLimitation") + time_interval_limitation: int = Field(default=0, alias="timeIntervalLimitation") + security_domains: List[LocalUserSecurityDomainModel] = Field(default_factory=list, alias="domains") + remote_id_claim: str = Field(default="", alias="remoteIDClaim") + remote_user_authorization: bool = Field(default=False, alias="xLaunch") + + def to_payload(self) -> Dict[str, Any]: + """Convert the model to the specific API payload format required.""" + + payload = self.model_dump(by_alias=True, exclude={'domains', 'reuseLimitation', 'timeIntervalLimitation'}) + + if self.security_domains: + payload["rbac"] = {"domains": {}} + for domain in self.security_domains: + payload["rbac"]["domains"].update(domain.to_api_payload()) + + if self.reuse_limitation or self.time_interval_limitation: + payload["passwordPolicy"] = { + "reuseLimitation": self.reuse_limitation, + "timeIntervalLimitation": self.time_interval_limitation, + } + + return payload + + @classmethod + def from_response(cls, payload: Dict[str, Any]) -> 'LocalUserModel': + + if reverse_user_roles_mapping is None: + reverse_user_roles_mapping = {} + + user_data = { + "email": payload.get("email"), + "loginID": payload.get("loginID"), + "firstName": payload.get("firstName"), + "lastName": payload.get("lastName"), + "password": payload.get("password"), + "remoteIDClaim": payload.get("remoteIDClaim"), + "xLaunch": payload.get("xLaunch"), + } + + password_policy = payload.get("passwordPolicy", {}) + user_data["reuseLimitation"] = password_policy.get("reuseLimitation", 0) + user_data["timeIntervalLimitation"] = password_policy.get("timeIntervalLimitation", 0) + + domains_data = [] + rbac = payload.get("rbac", {}) + if rbac and "domains" in rbac: + for domain_name, domain_config in rbac["domains"].items(): + domains_data.append(LocalUserSecurityDomainModel.from_api_response(domain_name, domain_config)) + + user_data["domains"] = domains_data + + return cls(**user_data) diff --git a/plugins/module_utils/models/local_user_model.py b/plugins/module_utils/models/local_user_model.py deleted file mode 100644 index f8de1f46..00000000 --- a/plugins/module_utils/models/local_user_model.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2025, Gaspard Micol (@gmicol) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import json -from typing import List, Dict, Any, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator - -# TODO: Add Field validation methods -# TODO: Add a method to get identifier(s) -> define a generic NDNetworkResourceModel -# TODO: Maybe define our own baseModel -# TODO: Look at ansible aliases -from pydantic import BaseModel, Field, ConfigDict -from typing import List, Dict, Any, Optional - -class SecurityDomainModel(BaseModel): - model_config = ConfigDict( - str_strip_whitespace=True, - use_enum_values=True, - validate_assignment=True, - populate_by_name=True, - ) - - name: str = Field(alias="name") - roles: list[str] = Field(default_factory=lambda: ["observer"], alias="roles") - - -class LocalUserModel(BaseModel): - model_config = ConfigDict( - str_strip_whitespace=True, - use_enum_values=True, - validate_assignment=True, - populate_by_name=True, - ) - - email: str = Field(default="", alias="email") - login_id: str = Field(alias="loginID") - first_name: str = Field(default="", alias="firstName") - last_name: str = Field(default="", alias="lastName") - user_password: str = Field(alias="password") - reuse_limitation: int = Field(default=0, alias="reuseLimitation") - time_interval_limitation: int = Field(default=0, alias="timeIntervalLimitation") - security_domains: List[SecurityDomainModel] = Field(default_factory=list, alias="domains") - remote_id_claim: str = Field(default="", alias="remoteIDClaim") - remote_user_authorization: bool = Field(default=False, alias="xLaunch") - - def to_api_payload(self, user_roles_mapping: Dict[str, str] = None) -> Dict[str, Any]: - """Convert the model to the specific API payload format required.""" - if user_roles_mapping is None: - user_roles_mapping = {} - - base_data = self.model_dump(by_alias=True, exclude={'domains', 'reuseLimitation', 'timeIntervalLimitation'}) - - payload = { - "email": base_data.get("email"), - "firstName": base_data.get("firstName"), - "lastName": base_data.get("lastName"), - "loginID": base_data.get("loginID"), - "password": base_data.get("password"), - "remoteIDClaim": base_data.get("remoteIDClaim"), - "xLaunch": base_data.get("xLaunch"), - } - - if self.security_domains: - payload["rbac"] = { - "domains": { - domain.name: { - "roles": [ - user_roles_mapping.get(role, role) for role in domain.roles - ] - } - for domain in self.security_domains - } - } - - if self.reuse_limitation or self.time_interval_limitation: - payload["passwordPolicy"] = { - "reuseLimitation": self.reuse_limitation, - "timeIntervalLimitation": self.time_interval_limitation, - } - - return payload - - @classmethod - def from_api_payload( - cls, - payload: Dict[str, Any], - reverse_user_roles_mapping: Optional[Dict[str, str]] = None - ) -> 'LocalUserModel': - - if reverse_user_roles_mapping is None: - reverse_user_roles_mapping = {} - - user_data = { - "email": payload.get("email", ""), - "loginID": payload.get("loginID", ""), - "firstName": payload.get("firstName", ""), - "lastName": payload.get("lastName", ""), - "password": payload.get("password", ""), - "remoteIDClaim": payload.get("remoteIDClaim", ""), - "xLaunch": payload.get("xLaunch", False), - } - - password_policy = payload.get("passwordPolicy", {}) - user_data["reuseLimitation"] = password_policy.get("reuseLimitation", 0) - user_data["timeIntervalLimitation"] = password_policy.get("timeIntervalLimitation", 0) - - domains_data = [] - rbac = payload.get("rbac", {}) - if rbac and "domains" in rbac: - for domain_name, domain_config in rbac["domains"].items(): - # Map API roles back to internal roles - api_roles = domain_config.get("roles", []) - internal_roles = [ - reverse_user_roles_mapping.get(role, role) for role in api_roles - ] - - domain_data = { - "name": domain_name, - "roles": internal_roles - } - domains_data.append(domain_data) - - user_data["domains"] = domains_data - - return cls(**user_data) - - # @classmethod - # def from_api_payload_json( - # cls, - # json_payload: str, - # reverse_user_roles_mapping: Optional[Dict[str, str]] = None - # ) -> 'LocalUserModel': - - # payload = json.loads(json_payload) - # return cls.from_api_payload(payload, reverse_user_roles_mapping) From 9b6bebdd79ee4eb92044f3e9c11892471150c8f3 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 22 Jan 2026 01:04:05 -0500 Subject: [PATCH 028/109] [ignore] Pydantic Models: Modify and Clean both local_user.py and base.py based on comments. Add a get method and get_identifier_value function to NDBaseModel. --- plugins/module_utils/models/base.py | 43 ++++++------ plugins/module_utils/models/local_user.py | 82 ++++++++++------------- 2 files changed, 57 insertions(+), 68 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index e7301d14..bdd1b9c2 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -11,6 +11,7 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict from typing import List, Dict, Any, Optional, ClassVar +from typing_extensions import Self class NDBaseModel(BaseModel, ABC): @@ -22,7 +23,7 @@ class NDBaseModel(BaseModel, ABC): populate_by_name=True, ) - # TODO: find ways to redifine these var in every + # TODO: find ways to redifine these var in every future NDBaseModels identifiers: ClassVar[List[str]] = [] use_composite_identifiers: ClassVar[bool] = False @@ -32,26 +33,28 @@ def to_payload(self) -> Dict[str, Any]: @classmethod @abstractmethod - def from_response(cls, response: Dict[str, Any]) -> 'NDBaseModel': + def from_response(cls, response: Dict[str, Any]) -> Self: pass - # TODO: Modify to make it more generic and Pydantic + def get(self, field: str, default: Any = None) -> Any: + """Custom get method to mimic dictionary behavior.""" + return getattr(self, field, default) + + # TODO: Modify to make it more generic and Pydantic | might change and be moved in different Generic Class/Model # TODO: add a method to get nested keys, ex: get("spec", {}).get("onboardUrl") def get_identifier_value(self) -> Any: - """Generates the internal map key based on the selected mode.""" - # if self.use_composite_keys: - # # Mode: Composite (Tuple of ALL keys) - # values = [] - # for key in self.identifier_keys: - # val = config.get(key) - # if val is None: - # return None # Missing a required part - # values.append(val) - # return tuple(values) - # else: - # # Mode: Priority (First available key) - # for key in self.identifier_keys: - # if key in config: - # return config[key] - # return None - pass + """Generates the internal map key based on the selected mode.""" + if self.use_composite_identifiers: + # Mode: Composite (Tuple of ALL keys) + values = [] + for identifier in self.identifiers: + value = self.get(identifier) + if value is None: + return None # Missing a required part | Add Error Handling method here + values.append(value) + return tuple(values) + else: + # Mode: Priority (First available key) + for identifier in self.identifiers: + return self.get(identifier) + return None diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 7877a5a5..28cea27c 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -8,9 +8,10 @@ __metaclass__ = type -from pydantic import Field, field_validator +from pydantic import Field, field_validator, SecretStr from types import MappingProxyType from typing import List, Dict, Any, Optional, ClassVar +from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel @@ -20,7 +21,7 @@ # TODO: Surclass BaseModel -> Priority # TODO: Look at ansible aliases -# TODO: use constants.py file in the future +# TODO: To be moved in constants.py file user_roles_mapping = MappingProxyType({}) @@ -39,15 +40,11 @@ def to_payload(self) -> Dict[str, Any]: } @classmethod - def from_response(cls, name: str, domain_config: List[str]) -> 'NDBaseModel': - internal_roles = [user_roles_mapping.get(role, role) for role in domain_config.get("roles", [])] - - domain_data = { - "name": name, - "roles": internal_roles - } - - return cls(**domain_data) + def from_response(cls, name: str, domain_config: List[str]) -> Self: + return cls( + name=name, + roles=[user_roles_mapping.get(role, role) for role in domain_config.get("roles", [])] + ) class LocalUserModel(NDBaseModel): @@ -55,17 +52,17 @@ class LocalUserModel(NDBaseModel): # TODO: Define a way to generate it (look at NDBaseModel comments) identifiers: ClassVar[List[str]] = ["login_id"] - # TODO: Use Optinal to remove default values (get them from API response instead) - email: str = Field(default="", alias="email") + email: Optional[str] = Field(alias="email") login_id: str = Field(alias="loginID") - first_name: str = Field(default="", alias="firstName") - last_name: str = Field(default="", alias="lastName") - user_password: str = Field(alias="password") - reuse_limitation: int = Field(default=0, alias="reuseLimitation") - time_interval_limitation: int = Field(default=0, alias="timeIntervalLimitation") - security_domains: List[LocalUserSecurityDomainModel] = Field(default_factory=list, alias="domains") - remote_id_claim: str = Field(default="", alias="remoteIDClaim") - remote_user_authorization: bool = Field(default=False, alias="xLaunch") + first_name: Optional[str] = Field(default="", alias="firstName") + last_name: Optional[str] = Field(default="", alias="lastName") + # TODO: Check secrets manipulation when tracking changes while maintaining security + user_password: Optional[SecretStr] = Field(alias="password") + reuse_limitation: Optional[int] = Field(default=0, alias="reuseLimitation") + time_interval_limitation: Optional[int] = Field(default=0, alias="timeIntervalLimitation") + security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(alias="domains") + remote_id_claim: Optional[str] = Field(default="", alias="remoteIDClaim") + remote_user_authorization: Optional[bool] = Field(default=False, alias="xLaunch") def to_payload(self) -> Dict[str, Any]: """Convert the model to the specific API payload format required.""" @@ -86,31 +83,20 @@ def to_payload(self) -> Dict[str, Any]: return payload @classmethod - def from_response(cls, payload: Dict[str, Any]) -> 'LocalUserModel': + def from_response(cls, response: Dict[str, Any]) -> Self: - if reverse_user_roles_mapping is None: - reverse_user_roles_mapping = {} - - user_data = { - "email": payload.get("email"), - "loginID": payload.get("loginID"), - "firstName": payload.get("firstName"), - "lastName": payload.get("lastName"), - "password": payload.get("password"), - "remoteIDClaim": payload.get("remoteIDClaim"), - "xLaunch": payload.get("xLaunch"), - } - - password_policy = payload.get("passwordPolicy", {}) - user_data["reuseLimitation"] = password_policy.get("reuseLimitation", 0) - user_data["timeIntervalLimitation"] = password_policy.get("timeIntervalLimitation", 0) - - domains_data = [] - rbac = payload.get("rbac", {}) - if rbac and "domains" in rbac: - for domain_name, domain_config in rbac["domains"].items(): - domains_data.append(LocalUserSecurityDomainModel.from_api_response(domain_name, domain_config)) - - user_data["domains"] = domains_data - - return cls(**user_data) + return cls( + email=response.get("email"), + login_id=response.get("loginID"), + first_name=response.get("firstName"), + last_name=response.get("lastName"), + user_password=response.get("password"), + reuse_limitation=response.get("passwordPolicy", {}).get("reuseLimitation"), + time_interval_limitation=response.get("passwordPolicy", {}).get("timeIntervalLimitation"), + security_domains=[ + LocalUserSecurityDomainModel.from_response(name, domain_config) + for name, domain_config in response.get("rbac", {}).get("domains", {}).items() + ], + remote_id_claim=response.get("remoteIDClaim"), + remote_user_authorization=response.get("xLaunch"), + ) From 019c8e3247c54b15870a3febd1bed53f6429facb Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Fri, 23 Jan 2026 00:56:49 -0500 Subject: [PATCH 029/109] [ignore] Pydantic ND base models and local_user models: Final proposition of core design adding new methods which will be used in NDConfigCollection and NDNetworkResourceModule classes as well as basic error handling and simple docstrings. --- plugins/module_utils/models/base.py | 124 ++++++++++++++---- plugins/module_utils/models/local_user.py | 146 ++++++++++++++-------- 2 files changed, 192 insertions(+), 78 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index bdd1b9c2..a7eabf17 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -10,51 +10,127 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict -from typing import List, Dict, Any, Optional, ClassVar +from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal from typing_extensions import Self class NDBaseModel(BaseModel, ABC): - + """ + Base model for all Nexus Dashboard API objects. + + Supports three identifier strategies: + - single: One unique required field (e.g., ["login_id"]) + - composite: Multiple required fields as tuple (e.g., ["device", "interface"]) + - hierarchical: Priority-ordered fields (e.g., ["uuid", "name"]) + """ + model_config = ConfigDict( str_strip_whitespace=True, use_enum_values=True, validate_assignment=True, populate_by_name=True, + extra='ignore' ) - - # TODO: find ways to redifine these var in every future NDBaseModels + + # Subclasses MUST define these identifiers: ClassVar[List[str]] = [] - use_composite_identifiers: ClassVar[bool] = False - + identifier_strategy: ClassVar[Literal["single", "composite", "hierarchical"]] = "single" + + # Optional: fields to exclude from diffs (e.g., passwords) + exclude_from_diff: ClassVar[List[str]] = [] + @abstractmethod def to_payload(self) -> Dict[str, Any]: + """ + Convert model to API payload format. + """ pass @classmethod @abstractmethod def from_response(cls, response: Dict[str, Any]) -> Self: + """ + Create model instance from API response. + """ pass - def get(self, field: str, default: Any = None) -> Any: - """Custom get method to mimic dictionary behavior.""" - return getattr(self, field, default) - - # TODO: Modify to make it more generic and Pydantic | might change and be moved in different Generic Class/Model - # TODO: add a method to get nested keys, ex: get("spec", {}).get("onboardUrl") - def get_identifier_value(self) -> Any: - """Generates the internal map key based on the selected mode.""" - if self.use_composite_identifiers: - # Mode: Composite (Tuple of ALL keys) + def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: + """ + Extract identifier value(s) from this instance: + - single identifier: Returns field value. + - composite identifiers: Returns tuple of all field values. + - hierarchical identifiers: Returns tuple of (field_name, value) for first non-None field. + """ + if not self.identifiers: + raise ValueError(f"{self.__class__.__name__} has no identifiers defined") + + if self.identifier_strategy == "single": + value = getattr(self, self.identifiers[0], None) + if value is None: + raise ValueError( + f"Single identifier field '{self.identifiers[0]}' is None" + ) + return value + + elif self.identifier_strategy == "composite": values = [] - for identifier in self.identifiers: - value = self.get(identifier) + missing = [] + + for field in self.identifiers: + value = getattr(self, field, None) if value is None: - return None # Missing a required part | Add Error Handling method here + missing.append(field) values.append(value) + + # NOTE: might not be needed in the future with field_validator + if missing: + raise ValueError( + f"Composite identifier fields {missing} are None. " + f"All required: {self.identifiers}" + ) + return tuple(values) + + elif self.identifier_strategy == "hierarchical": + for field in self.identifiers: + value = getattr(self, field, None) + if value is not None: + return (field, value) + + raise ValueError( + f"No non-None value in hierarchical fields {self.identifiers}" + ) + else: - # Mode: Priority (First available key) - for identifier in self.identifiers: - return self.get(identifier) - return None + raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Export for diff comparison (excludes sensitive fields). + """ + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude=set(self.exclude_from_diff) + ) + +# NOTE: Maybe make it a seperate BaseModel +class NDNestedModel(NDBaseModel): + """ + Base for nested models without identifiers. + """ + + identifiers: ClassVar[List[str]] = [] + + def to_payload(self) -> Dict[str, Any]: + """ + Convert model to API payload format. + """ + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + """ + Create model instance from API response. + """ + return cls.model_validate(response) diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 28cea27c..b7069126 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -8,95 +8,133 @@ __metaclass__ = type -from pydantic import Field, field_validator, SecretStr +from pydantic import Field, SecretStr from types import MappingProxyType -from typing import List, Dict, Any, Optional, ClassVar +from typing import List, Dict, Any, Optional, ClassVar, Literal from typing_extensions import Self -from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel, NDNestedModel +from models.base import NDBaseModel, NDNestedModel -# TODO: Add Field validation methods -# TODO: define our own Field class for string versioning, ansible aliases -# TODO: Add a method to get identifier(s) -> define a generic NDNetworkResourceModel -# TODO: Surclass BaseModel -> Priority -# TODO: Look at ansible aliases +# TODO: Move it to constants.py and import it +USER_ROLES_MAPPING = MappingProxyType({ + "fabric_admin": "fabric-admin", + "observer": "observer", + "super_admin": "super-admin", + "support_engineer": "support-engineer", + "approver": "approver", + "designer": "designer", +}) -# TODO: To be moved in constants.py file -user_roles_mapping = MappingProxyType({}) +class LocalUserSecurityDomainModel(NDNestedModel): + """Security domain configuration for local user (nested model).""" -class LocalUserSecurityDomainModel(NDBaseModel): - - name: str = Field(alias="name") - roles: list[str] = Field(default_factory=lambda: ["observer"], alias="roles") - + # Fields + name: str + roles: Optional[List[str]] = None + def to_payload(self) -> Dict[str, Any]: - return { + + return { self.name: { "roles": [ - user_roles_mapping.get(role, role) for role in self.roles + USER_ROLES_MAPPING.get(role, role) + for role in (self.roles or []) ] } } - + @classmethod - def from_response(cls, name: str, domain_config: List[str]) -> Self: + def from_response(cls, name: str, domain_config: Dict[str, Any]) -> Self: + + # NOTE: Maybe create a function from it to be moved to utils.py and to be imported + reverse_mapping = {value: key for key, value in USER_ROLES_MAPPING.items()} + return cls( name=name, - roles=[user_roles_mapping.get(role, role) for role in domain_config.get("roles", [])] + roles=[ + reverse_mapping.get(role, role) + for role in domain_config.get("roles", []) + ] ) class LocalUserModel(NDBaseModel): + """ + Local user configuration. - # TODO: Define a way to generate it (look at NDBaseModel comments) + Identifier: login_id (single field) + """ + + # Identifier configuration identifiers: ClassVar[List[str]] = ["login_id"] - - email: Optional[str] = Field(alias="email") - login_id: str = Field(alias="loginID") - first_name: Optional[str] = Field(default="", alias="firstName") - last_name: Optional[str] = Field(default="", alias="lastName") - # TODO: Check secrets manipulation when tracking changes while maintaining security - user_password: Optional[SecretStr] = Field(alias="password") - reuse_limitation: Optional[int] = Field(default=0, alias="reuseLimitation") - time_interval_limitation: Optional[int] = Field(default=0, alias="timeIntervalLimitation") - security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(alias="domains") - remote_id_claim: Optional[str] = Field(default="", alias="remoteIDClaim") - remote_user_authorization: Optional[bool] = Field(default=False, alias="xLaunch") - + identifier_strategy: ClassVar[Literal["single", "composite", "hierarchical"]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["user_password"] + + # Fields + login_id: str = Field(..., alias="loginID") + email: Optional[str] = None + first_name: Optional[str] = Field(default=None, alias="firstName") + last_name: Optional[str] = Field(default=None, alias="lastName") + user_password: Optional[SecretStr] = Field(default=None, alias="password") + reuse_limitation: Optional[int] = Field(default=None, alias="reuseLimitation") + time_interval_limitation: Optional[int] = Field(default=None, alias="timeIntervalLimitation") + security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(default=None, alias="domains") + remote_id_claim: Optional[str] = Field(default=None, alias="remoteIDClaim") + remote_user_authorization: Optional[bool] = Field(default=None, alias="xLaunch") + def to_payload(self) -> Dict[str, Any]: - """Convert the model to the specific API payload format required.""" + payload = self.model_dump( + by_alias=True, + exclude={ + 'domains', + 'security_domains', + 'reuseLimitation', + 'reuse_limitation', + 'timeIntervalLimitation', + 'time_interval_limitation' + }, + exclude_none=True + ) - payload = self.model_dump(by_alias=True, exclude={'domains', 'reuseLimitation', 'timeIntervalLimitation'}) + if self.user_password: + payload["password"] = self.user_password.get_secret_value() if self.security_domains: payload["rbac"] = {"domains": {}} for domain in self.security_domains: - payload["rbac"]["domains"].update(domain.to_api_payload()) - - if self.reuse_limitation or self.time_interval_limitation: - payload["passwordPolicy"] = { - "reuseLimitation": self.reuse_limitation, - "timeIntervalLimitation": self.time_interval_limitation, - } - + payload["rbac"]["domains"].update(domain.to_payload()) + + if self.reuse_limitation is not None or self.time_interval_limitation is not None: + payload["passwordPolicy"] = {} + if self.reuse_limitation is not None: + payload["passwordPolicy"]["reuseLimitation"] = self.reuse_limitation + if self.time_interval_limitation is not None: + payload["passwordPolicy"]["timeIntervalLimitation"] = self.time_interval_limitation + return payload - + @classmethod def from_response(cls, response: Dict[str, Any]) -> Self: + password_policy = response.get("passwordPolicy", {}) + rbac = response.get("rbac", {}) + domains = rbac.get("domains", {}) + + security_domains = [ + LocalUserSecurityDomainModel.from_response(name, config) + for name, config in domains.items() + ] if domains else None return cls( - email=response.get("email"), login_id=response.get("loginID"), + email=response.get("email"), first_name=response.get("firstName"), last_name=response.get("lastName"), user_password=response.get("password"), - reuse_limitation=response.get("passwordPolicy", {}).get("reuseLimitation"), - time_interval_limitation=response.get("passwordPolicy", {}).get("timeIntervalLimitation"), - security_domains=[ - LocalUserSecurityDomainModel.from_response(name, domain_config) - for name, domain_config in response.get("rbac", {}).get("domains", {}).items() - ], + reuse_limitation=password_policy.get("reuseLimitation"), + time_interval_limitation=password_policy.get("timeIntervalLimitation"), + security_domains=security_domains, remote_id_claim=response.get("remoteIDClaim"), - remote_user_authorization=response.get("xLaunch"), + remote_user_authorization=response.get("xLaunch") ) From 427f33f24f9c31b2209a60b06230f1cef5324cb4 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Fri, 23 Jan 2026 13:09:33 -0500 Subject: [PATCH 030/109] [ignore] Pydantic ND Config Collection: Final proposition of core design changing existing methods and adding new ones which will be used in NDNetworkResourceModule class as well as basic error handling and simple docstrings. --- plugins/module_utils/nd_config_collection.py | 515 ++++++++++--------- 1 file changed, 266 insertions(+), 249 deletions(-) diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 8f0058bb..2f256d30 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -8,289 +8,306 @@ __metaclass__ = type -import sys +from typing import TypeVar, Generic, Optional, List, Dict, Any, Union, Tuple, Literal, Callable from copy import deepcopy -from functools import reduce -# Python 2 and 3 compatibility (To be removed in the future) -if sys.version_info[0] >= 3: - from collections.abc import MutableMapping - iteritems = lambda d: d.items() -else: - from collections import MutableMapping - iteritems = lambda d: d.iteritems() +# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from models.base import NDBaseModel -# TODO: Adapt to Pydantic Models -# NOTE: Single-Index Hybrid Collection for ND Network Resource Module -class NDConfigCollection(MutableMapping): +# Type aliases +# NOTE: Maybe add more type aliases in the future if needed +ModelType = TypeVar('ModelType', bound=NDBaseModel) +IdentifierKey = Union[str, int, Tuple[Any, ...]] - def __init__(self, identifier_keys, data=None, use_composite_keys=False): - self.identifier_keys = identifier_keys - self.use_composite_keys = use_composite_keys - - # Dual Storage - self._list = [] - self._map = {} + +class NDConfigCollection(Generic[ModelType]): + """ + Nexus Dashboard configuration collection for NDBaseModel instances. + """ + + def __init__(self, model_class: type[ModelType], items: Optional[List[ModelType]] = None): + """ + Initialize collection. + """ + self._model_class = model_class - if data: - for item in data: + # Dual storage + self._items: List[ModelType] = [] + self._index: Dict[IdentifierKey, int] = {} + + if items: + for item in items: self.add(item) - # TODO: add a method to get nested keys, ex: get("spec", {}).get("onboardUrl") - def _get_identifier_value(self, config): - """Generates the internal map key based on the selected mode.""" - if self.use_composite_keys: - # Mode: Composite (Tuple of ALL keys) - values = [] - for key in self.identifier_keys: - val = config.get(key) - if val is None: - return None # Missing a required part - values.append(val) - return tuple(values) - else: - # Mode: Priority (First available key) - for key in self.identifier_keys: - if key in config: - return config[key] - return None - - # Magic Methods - def __getitem__(self, key): - return self._map[key] - - def __setitem__(self, key, value): - if key in self._map: - old_ref = self._map[key] - try: - idx = self._list.index(old_ref) - self._list[idx] = value - self._map[key] = value - except ValueError: - pass - else: - # Add new - self._list.append(value) - self._map[key] = value - - def __delitem__(self, key): - if key in self._map: - obj_ref = self._map[key] - del self._map[key] - self._list.remove(obj_ref) + def _extract_key(self, item: ModelType) -> IdentifierKey: + """ + Extract identifier key from item. + """ + try: + return item.get_identifier_value() + except Exception as e: + raise ValueError(f"Failed to extract identifier: {e}") from e + + def _rebuild_index(self) -> None: + """Rebuild index from scratch (O(n) operation).""" + self._index.clear() + for index, item in enumerate(self._items): + key = self._extract_key(item) + self._index[key] = index + + # Core CRUD Operations + + def add(self, item: ModelType) -> IdentifierKey: + """ + Add item to collection (O(1) operation). + """ + if not isinstance(item, self._model_class): + raise TypeError( + f"Item must be instance of {self._model_class.__name__}, " + f"got {type(item).__name__}" + ) + + key = self._extract_key(item) + + if key in self._index: + raise ValueError( + f"Item with identifier {key} already exists. Use replace() to update" + ) + + position = len(self._items) + self._items.append(item) + self._index[key] = position + + return key + + def get(self, key: IdentifierKey) -> Optional[ModelType]: + """ + Get item by identifier key (O(1) operation). + """ + index = self._index.get(key) + return self._items[index] if index is not None else None + + def replace(self, item: ModelType) -> bool: + """ + Replace existing item with same identifier (O(1) operation). + """ + if not isinstance(item, self._model_class): + raise TypeError( + f"Item must be instance of {self._model_class.__name__}, " + f"got {type(item).__name__}" + ) + + key = self._extract_key(item) + index = self._index.get(key) + + if index is None: + return False + + self._items[index] = item + return True + + def merge(self, item: ModelType, custom_merge_function: Optional[Callable[[ModelType, ModelType], ModelType]] = None) -> ModelType: + """ + Merge item with existing, or add if not present. + """ + key = self._extract_key(item) + existing = self.get(key) + + if existing is None: + self.add(item) + return item + + # Custom or default merge + if custom_merge_function: + merged = custom_merge_function(existing, item) else: - raise KeyError(key) - - def __iter__(self): - return iter(self._map) - - def __len__(self): - return len(self._list) + # Default merge + existing_data = existing.model_dump() + new_data = item.model_dump(exclude_unset=True) + merged_data = self._deep_merge(existing_data, new_data) + merged = self._model_class.model_validate(merged_data) + + self.replace(merged) + return merged - def __eq__(self, other): - if isinstance(other, NDConfigCollection): - return self._list == other._list - elif isinstance(other, list): - return self._list == other - elif isinstance(other, dict): - return self._map == other - return False + def _deep_merge(self, base: Dict, update: Dict) -> Dict: + """Recursively merge dictionaries.""" + result = base.copy() + + for key, value in update.items(): + if value is None: + continue + + if key in result and isinstance(result[key], dict) and isinstance(value, dict): + result[key] = self._deep_merge(result[key], value) + else: + result[key] = value + + return result + + def delete(self, key: IdentifierKey) -> bool: + """ + Delete item by identifier (O(n) operation due to index rebuild) + """ + index = self._index.get(key) + + if index is None: + return False + + del self._items[index] + self._rebuild_index() + + return True + + # Diff Operations + + def get_diff_config(self, new_item: ModelType, unwanted_keys: Optional[List[Union[str, List[str]]]] = None) -> Literal["new", "no_diff", "changed"]: + """ + Compare single item against collection. + """ + try: + key = self._extract_key(new_item) + except ValueError: + return "new" + + existing = self.get(key) + + if existing is None: + return "new" - def __ne__(self, other): - return not self.__eq__(other) + existing_data = existing.to_diff_dict() + new_data = new_item.to_diff_dict() + + if unwanted_keys: + existing_data = self._remove_unwanted_keys(existing_data, unwanted_keys) + new_data = self._remove_unwanted_keys(new_data, unwanted_keys) - def __repr__(self): - return str(self._list) + is_subset = self._issubset(new_data, existing_data) + + return "no_diff" if is_subset else "changed" + + def get_diff_collection(self, other: "NDConfigCollection[ModelType]", unwanted_keys: Optional[List[Union[str, List[str]]]] = None) -> bool: + """ + Check if two collections differ. + """ + if not isinstance(other, NDConfigCollection): + raise TypeError("Argument must be NDConfigCollection") + + if len(self) != len(other): + return True - # Helper Methods - def _filter_dict(self, data, ignore_keys): - return {k: v for k, v in iteritems(data) if k not in ignore_keys} + for item in other: + if self.get_diff_config(item, unwanted_keys) != "no_diff": + return True - def _issubset(self, subset, superset): + for key in self.keys(): + if other.get(key) is None: + return True + + return False + + def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[IdentifierKey]: + """ + Get identifiers in self but not in other. + """ + current_keys = set(self.keys()) + other_keys = set(other.keys()) + return list(current_keys - other_keys) + + def _issubset(self, subset: Any, superset: Any) -> bool: + """Check if subset is contained in superset.""" if type(subset) is not type(superset): return False - + if not isinstance(subset, dict): if isinstance(subset, list): return all(item in superset for item in subset) return subset == superset - - for key, value in iteritems(subset): + + for key, value in subset.items(): if value is None: continue - + if key not in superset: return False - - superset_value = superset.get(key) - - if not self._issubset(value, superset_value): + + if not self._issubset(value, superset[key]): return False + return True - def _remove_unwanted_keys(self, data, unwanted_keys): + def _remove_unwanted_keys(self, data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: + """Remove unwanted keys from dict (supports nested paths).""" + data = deepcopy(data) + for key in unwanted_keys: if isinstance(key, str): if key in data: del data[key] + elif isinstance(key, list) and len(key) > 0: - key_path, last = key[:-1], key[-1] try: - parent = reduce(lambda d, k: d.get(k) if isinstance(d, dict) else None, key_path, data) - if isinstance(parent, dict) and last in parent: - del parent[last] - except (KeyError, TypeError): + parent = data + for k in key[:-1]: + if isinstance(parent, dict) and k in parent: + parent = parent[k] + else: + break + else: + if isinstance(parent, dict) and key[-1] in parent: + del parent[key[-1]] + except (KeyError, TypeError, IndexError): pass + return data - - # Core Operations - def to_list(self): - return self._list + + # Collection Operations - def to_dict(self): - return self._map - - def copy(self): - return NDConfigCollection(self.identifier_keys, deepcopy(self._list), self.use_composite_keys) - - def add(self, config): - ident = self._get_identifier_value(config) - if ident is None: - mode = "Composite" if self.use_composite_keys else "Priority" - raise ValueError("[{0} Mode] Config missing required keys: {1}".format(mode, self.identifier_keys)) - - if ident in self._map: - self.__setitem__(ident, config) - else: - self._list.append(config) - self._map[ident] = config - - def merge(self, new_config): - ident = self._get_identifier_value(new_config) - if ident and ident in self._map: - self._map[ident].update(new_config) - else: - self.add(new_config) - - def replace(self, new_config): - ident = self._get_identifier_value(new_config) - if ident: - self[ident] = new_config - else: - self.add(new_config) - - def remove(self, identifiers): - # Try Map Removal - try: - target_key = self._get_identifier_value(identifiers) - if target_key and target_key in self._map: - self.__delitem__(target_key) - return - except Exception: - pass - - # Fallback: Linear Removal - to_remove = [] - for config in self._list: - match = True - for k, v in iteritems(identifiers): - if config.get(k) != v: - match = False - break - if match: - to_remove.append(self._get_identifier_value(config)) - - for ident in to_remove: - if ident in self._map: - self.__delitem__(ident) - - def get_by_key(self, key, default=None): - return self._map.get(key, default) - - def get_by_idenfiers(self, identifiers, default=None): - # Try Map Lookup - target_key = self._get_identifier_value(identifiers) - if target_key and target_key in self._map: - return self._map[target_key] - - # Fallback: Linear Lookup - valid_search_keys = [k for k in identifiers if k in self.identifier_keys] - if not valid_search_keys: - return default - - for config in self._list: - match = True - for k in valid_search_keys: - if config.get(k) != identifiers[k]: - match = False - break - if match: - return config - return default - - # Diff logic - def get_diff_config(self, new_config, unwanted_keys=None): - unwanted_keys = unwanted_keys or [] - - ident = self._get_identifier_value(new_config) - - if not ident or ident not in self._map: - return "new" - - existing = deepcopy(self._map[ident]) - sent = deepcopy(new_config) - - self._remove_unwanted_keys(existing, unwanted_keys) - self._remove_unwanted_keys(sent, unwanted_keys) - - is_subset = self._issubset(sent, existing) - - if is_subset: - return "no_diff" - else: - return "changed" - - def get_diff_collection(self, new_collection, unwanted_keys=None): - if not isinstance(new_collection, NDConfigCollection): - raise TypeError("Argument must be an NDConfigCollection") - - if len(self) != len(new_collection): - return True - - for item in new_collection.to_list(): - if self.get_diff_config(item, unwanted_keys) != "no_diff": - return True - - for ident in self._map: - if ident not in new_collection._map: - return True - - return False - - def get_diff_identifiers(self, new_collection): - current_identifiers = set(self.config_collection.keys()) - other_identifiers = set(new_collection.config_collection.keys()) - - return list(current_identifiers - other_identifiers) + def __len__(self) -> int: + """Return number of items.""" + return len(self._items) + + def __iter__(self): + """Iterate over items.""" + return iter(self._items) - # Sanitize Operations - def sanitize(self, keys_to_remove=None, values_to_remove=None, remove_none_values=False): - keys_to_remove = keys_to_remove or [] - values_to_remove = values_to_remove or [] + def keys(self) -> List[IdentifierKey]: + """Get all identifier keys.""" + return list(self._index.keys()) - def recursive_clean(obj): - if isinstance(obj, dict): - keys = list(obj.keys()) - for k in keys: - v = obj[k] - if k in keys_to_remove or v in values_to_remove or (remove_none_values and v is None): - del obj[k] - continue - if isinstance(v, (dict, list)): - recursive_clean(v) - elif isinstance(obj, list): - for item in obj: - recursive_clean(item) + def copy(self) -> "NDConfigCollection[ModelType]": + """Create deep copy of collection.""" + return NDConfigCollection( + model_class=self._model_class, + items=deepcopy(self._items) + ) - for item in self._list: - recursive_clean(item) + # Serialization + + def to_list(self, **kwargs) -> List[Dict]: + """ + Export as list of dicts (with aliases). + """ + return [item.model_dump(by_alias=True, exclude_none=True, **kwargs) for item in self._items] + + def to_payload_list(self) -> List[Dict[str, Any]]: + """ + Export as list of API payloads. + """ + return [item.to_payload() for item in self._items] + + @classmethod + def from_list(cls, data: List[Dict], model_class: type[ModelType]) -> "NDConfigCollection[ModelType]": + """ + Create collection from list of dicts. + """ + items = [model_class.model_validate(item_data) for item_data in data] + return cls(model_class=model_class, items=items) + + @classmethod + def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[ModelType]) -> "NDConfigCollection[ModelType]": + """ + Create collection from API response. + """ + items = [model_class.from_response(item_data) for item_data in response_data] + return cls(model_class=model_class, items=items) From 6fe3bbff551efe1c895533f5e69bd4a4e8868ff3 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Fri, 23 Jan 2026 13:51:54 -0500 Subject: [PATCH 031/109] [ignore] Pydantic Base ND Network Resource Module: Final proposition of core design changing existing methods and adding new ones which will be used in future as a based for ND network resource modules as well as basic error handling and simple docstrings. --- plugins/module_utils/nd_network_resources.py | 561 ++++++++++++++----- 1 file changed, 411 insertions(+), 150 deletions(-) diff --git a/plugins/module_utils/nd_network_resources.py b/plugins/module_utils/nd_network_resources.py index 3b549da1..ab7df9e2 100644 --- a/plugins/module_utils/nd_network_resources.py +++ b/plugins/module_utils/nd_network_resources.py @@ -9,196 +9,457 @@ __metaclass__ = type from copy import deepcopy -from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule -from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection -from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +from typing import Optional, List, Dict, Any, Callable, Literal +from pydantic import ValidationError -# TODO: Make further enhancement to logs and outputs -# TODO: Adapt to Pydantic Models -# NOTE: ONLY works for new API endpoints introduced in ND v4.1.0 and later -class NDNetworkResourceModule(NDModule): +# TODO: To be replaced with: +# from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule +# from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection +# from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +# from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +from nd import NDModule +from nd_config_collection import NDConfigCollection +from models.base import NDBaseModel +from constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED - def __init__(self, module, path, identifier_keys, use_composite_keys=False, actions_overwrite_map=None): - super().__init__(module) - # Initial variables +class NDNetworkResourceModule(NDModule): + """ + Generic Network Resource Module for Nexus Dashboard. + """ + + def __init__(self, module, path: str, model_class: type[NDBaseModel], actions_overwrite_map: Optional[Dict[str, Callable]] = None): + """ + Initialize the Network Resource Module. + """ + super().__init__(module) + + # Configuration self.path = path + self.model_class = model_class self.actions_overwrite_map = actions_overwrite_map or {} - self.identifier_keys = identifier_keys - self.use_composite_keys = use_composite_keys - - # Initial data - self.init_all_data = self._query_all() - - # Info ouput - self.existing = NDConfigCollection(identifier_keys, data=self.init_all_data) - self.previous = NDConfigCollection(identifier_keys) - self.proposed = NDConfigCollection(identifier_keys) - self.sent = NDConfigCollection(identifier_keys) - - # Debug output - self.nd_logs = [] - - # Helper variables - self.current_identifier = "" - self.existing_config = {} - self.proposed_config = {} - - # Actions Operations - def actions_overwrite(action): + + # Initialize collections + try: + init_all_data = self._query_all() + + self.existing = NDConfigCollection.from_api_response( + response_data=init_all_data, + model_class=model_class + ) + self.previous = NDConfigCollection(model_class=model_class) + self.proposed = NDConfigCollection(model_class=model_class) + self.sent = NDConfigCollection(model_class=model_class) + + except Exception as e: + self.fail_json( + msg=f"Initialization failed: {str(e)}", + error=str(e) + ) + + # Operation tracking + self.nd_logs: List[Dict[str, Any]] = [] + + # Current operation context + self.current_identifier = None + self.existing_config: Dict[str, Any] = {} + self.proposed_config: Dict[str, Any] = {} + + # Action Decorator + + @staticmethod + def actions_overwrite(action: str): + """ + Decorator to allow overriding default action operations. + """ def decorator(func): def wrapper(self, *args, **kwargs): overwrite_action = self.actions_overwrite_map.get(action) if callable(overwrite_action): - return overwrite_action(self) + return overwrite_action(self, *args, **kwargs) else: return func(self, *args, **kwargs) return wrapper return decorator - + + # Action Operations + @actions_overwrite("create") - def _create(self): - if not self.module.check_mode: + def _create(self) -> Optional[Dict[str, Any]]: + """ + Create a new configuration object. + """ + if self.module.check_mode: + return self.proposed_config + + try: return self.request(path=self.path, method="POST", data=self.proposed_config) - + except Exception as e: + raise Exception(f"Create failed for {self.current_identifier}: {e}") from e + @actions_overwrite("update") - def _update(self): - if not self.module.check_mode: - object_path = "{0}/{1}".format(self.path, self.current_identifier) + def _update(self) -> Optional[Dict[str, Any]]: + """ + Update an existing configuration object. + """ + if self.module.check_mode: + return self.proposed_config + + try: + object_path = f"{self.path}/{self.current_identifier}" return self.request(path=object_path, method="PUT", data=self.proposed_config) - + except Exception as e: + raise Exception(f"Update failed for {self.current_identifier}: {e}") from e + @actions_overwrite("delete") - def _delete(self): - if not self.module.check_mode: - object_path = "{0}/{1}".format(self.path, self.current_identifier) + def _delete(self) -> None: + """Delete a configuration object.""" + if self.module.check_mode: + return + + try: + object_path = f"{self.path}/{self.current_identifier}" self.request(path=object_path, method="DELETE") + except Exception as e: + raise Exception(f"Delete failed for {self.current_identifier}: {e}") from e @actions_overwrite("query_all") - def _query_all(self): - return self.query_obj(self.path) - - def format_log(self, identifier, status, after_data, sent_payload_data=None): - item_result = { + def _query_all(self) -> List[Dict[str, Any]]: + """ + Query all configuration objects from device. + """ + try: + result = self.query_obj(self.path) + return result or [] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e + + # Logging + + def format_log(self, identifier, status: Literal["created", "updated", "deleted", "no_change"], after_data: Optional[Dict[str, Any]] = None, sent_payload_data: Optional[Dict[str, Any]] = None) -> None: + """ + Create and append a log entry. + """ + log_entry = { "identifier": identifier, "status": status, - "before": self.existing_config, + "before": deepcopy(self.existing_config), "after": deepcopy(after_data) if after_data is not None else self.existing_config, - "sent_payload": deepcopy(sent_payload_data) if sent_payload_data is not None else {}, + "sent_payload": deepcopy(sent_payload_data) if sent_payload_data is not None else {} } - + + # Add HTTP details if not in check mode if not self.module.check_mode and self.url is not None: - item_result.update( - { - "method": self.method, - "response": self.response, - "status": self.status, - "url": self.url, - } + log_entry.update({ + "method": self.method, + "response": self.response, + "status": self.status, + "url": self.url + }) + + self.nd_logs.append(log_entry) + + # State Management + + def manage_state( + self, state: Literal["merged", "replaced", "overridden", "deleted"], new_configs: List[Dict[str, Any]], unwanted_keys: Optional[List] = None, override_exceptions: Optional[List] = None) -> None: + """ + Manage state according to desired configuration. + """ + unwanted_keys = unwanted_keys or [] + override_exceptions = override_exceptions or [] + + # Parse and validate configs + try: + parsed_items = [] + for config in new_configs: + try: + # Parse config into model + item = self.model_class.model_validate(config) + parsed_items.append(item) + except ValidationError as e: + self.fail_json( + msg=f"Invalid configuration: {e}", + config=config, + validation_errors=e.errors() + ) + return + + # Create proposed collection + self.proposed = NDConfigCollection( + model_class=self.model_class, + items=parsed_items ) + + # Save previous state + self.previous = self.existing.copy() - self.nd_logs.append(item_result) - - # Logs and Outputs formating Operations - # TODO: Move it to different file - def add_logs_and_ouputs(self): - if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - if self.params.get("output_level") in ("debug", "info"): + except Exception as e: + self.fail_json( + msg=f"Failed to prepare configurations: {e}", + error=str(e) + ) + return + + # Execute state operations + if state in ["merged", "replaced", "overridden"]: + self._manage_create_update_state(state, unwanted_keys) + + if state == "overridden": + self._manage_override_deletions(override_exceptions) + + elif state == "deleted": + self._manage_delete_state() + + else: + self.fail_json(msg=f"Invalid state: {state}") + + def _manage_create_update_state(self,state: Literal["merged", "replaced", "overridden"], unwanted_keys: List) -> None: + """ + Handle merged/replaced/overridden states. + """ + for proposed_item in self.proposed: + try: + # Extract identifier + identifier = proposed_item.get_identifier_value() + self.current_identifier = identifier + + existing_item = self.existing.get(identifier) + self.existing_config = ( + existing_item.model_dump(by_alias=True, exclude_none=True) + if existing_item + else {} + ) + + # Determine diff status + diff_status = self.existing.get_diff_config( + proposed_item, + unwanted_keys=unwanted_keys + ) + + # No changes needed + if diff_status == "no_diff": + self.format_log( + identifier=identifier, + status="no_change", + after_data=self.existing_config + ) + continue + + # Prepare final config based on state + if state == "merged" and existing_item: + # Merge with existing + merged_item = self.existing.merge(proposed_item) + final_item = merged_item + else: + # Replace or create + if existing_item: + self.existing.replace(proposed_item) + else: + self.existing.add(proposed_item) + final_item = proposed_item + + # Convert to API payload + self.proposed_config = final_item.to_payload() + + # Execute API operation + if diff_status == "changed": + response = self._update() + operation_status = "updated" + else: + response = self._create() + operation_status = "created" + + # Track sent payload + if not self.module.check_mode: + self.sent.add(final_item) + sent_payload = self.proposed_config + else: + sent_payload = None + + # Log operation + self.format_log( + identifier=identifier, + status=operation_status, + after_data=( + response if not self.module.check_mode + else final_item.model_dump(by_alias=True, exclude_none=True) + ), + sent_payload_data=sent_payload + ) + + except Exception as e: + error_msg = f"Failed to process {identifier}: {e}" + + self.format_log( + identifier=identifier, + status="no_change", + after_data=self.existing_config + ) + + if not self.module.params.get("ignore_errors", False): + self.fail_json( + msg=error_msg, + identifier=str(identifier), + error=str(e) + ) + return + + def _manage_override_deletions(self, override_exceptions: List) -> None: + """ + Delete items not in proposed config (for overridden state). + """ + diff_identifiers = self.previous.get_diff_identifiers(self.proposed) + + for identifier in diff_identifiers: + if identifier in override_exceptions: + continue + + try: + self.current_identifier = identifier + + existing_item = self.existing.get(identifier) + if not existing_item: + continue + + self.existing_config = existing_item.model_dump( + by_alias=True, + exclude_none=True + ) + + # Execute delete + self._delete() + + # Remove from collection + self.existing.delete(identifier) + + # Log deletion + self.format_log( + identifier=identifier, + status="deleted", + after_data={} + ) + + except Exception as e: + error_msg = f"Failed to delete {identifier}: {e}" + + if not self.module.params.get("ignore_errors", False): + self.fail_json( + msg=error_msg, + identifier=str(identifier), + error=str(e) + ) + return + + def _manage_delete_state(self) -> None: + """Handle deleted state.""" + for proposed_item in self.proposed: + try: + identifier = proposed_item.get_identifier_value() + self.current_identifier = identifier + + existing_item = self.existing.get(identifier) + if not existing_item: + # Already deleted or doesn't exist + self.format_log( + identifier=identifier, + status="no_change", + after_data={} + ) + continue + + self.existing_config = existing_item.model_dump( + by_alias=True, + exclude_none=True + ) + + # Execute delete + self._delete() + + # Remove from collection + self.existing.delete(identifier) + + # Log deletion + self.format_log( + identifier=identifier, + status="deleted", + after_data={} + ) + + except Exception as e: + error_msg = f"Failed to delete {identifier}: {e}" + + if not self.module.params.get("ignore_errors", False): + self.fail_json( + msg=error_msg, + identifier=str(identifier), + error=str(e) + ) + return + + # Output Formatting + + def add_logs_and_outputs(self) -> None: + """Add logs and outputs to module result based on output_level.""" + output_level = self.params.get("output_level", "normal") + state = self.params.get("state") + + # Add previous state for certain states and output levels + if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + if output_level in ("debug", "info"): self.result["previous"] = self.previous.to_list() + + # Check if there were changes if not self.has_modified and self.previous.get_diff_collection(self.existing): self.result["changed"] = True + + # Add stdout if present if self.stdout: self.result["stdout"] = self.stdout - - if self.params.get("output_level") == "debug": + + # Add debug information + if output_level == "debug": self.result["nd_logs"] = self.nd_logs + if self.url is not None: self.result["httpapi_logs"] = self.httpapi_logs - - if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - self.result["sent"] = self.sent.to_list() - self.result["proposed"] = self.proposed.to_list() - - self.result["current"] = self.existing.to_list() - - # Manage State Operations - def manage_state(self, state, new_configs, unwanted_keys=None, override_exceptions=None): - unwanted_keys = unwanted_keys or [] - override_exceptions = override_exceptions or [] - - self.proposed = NDConfigCollection(self.identifier_keys, data=new_configs) - self.proposed.sanitize() - self.previous = self.existing.copy() - - if state in ["merged", "replaced", "overidden"]: - for identifier, config in self.proposed.items(): - - diff_config_info = self.existing.get_diff_config(config, unwanted_keys) - self.current_identifier = identifier - self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) - self.proposed_config = config - request_response = None - sent_payload = None - status = "no_change" - - if diff_config_info != "no_diff": - if state == "merged": - self.existing.merge(config) - self.proposed_config = self.existing[identifier] - else: - self.existing.replace(config) - - if diff_config_info == "changed": - request_response = self._update() - status = "updated" - else: - request_response = self._create() - status= "created" - - if not self.module.check_mode: - self.sent.add(self.proposed_config) - sent_payload = self.proposed_config - else: - request_response = self.proposed_config - - self.format_log(identifier, status, request_response, sent_payload) - - if state == "overidden": - diff_identifiers = self.previous.get_diff_identifiers(self.proposed) - for identifier in diff_identifiers: - if identifier not in override_exceptions: - self.current_identifier = identifier - self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) - self._delete() - del self.existing[identifier] - self.format_log(identifier, "deleted", after_data={}) + if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + self.result["sent"] = self.sent.to_payload_list() + self.result["proposed"] = self.proposed.to_list() - - elif state == "deleted": - for identifier, config in self.proposed.items(): - if identifier in self.existing.keys(): - self.current_identifier = identifier - self.existing_config = deepcopy(self.existing.get_by_key(identifier, {})) - self.proposed_config = config - self._delete() - del self.existing[identifier] - self.format_log(identifier, "deleted", after_data={}) - - # Outputs Operations - def fail_json(self, msg, **kwargs): - self.add_logs_and_ouputs() - + # Always include current state + self.result["current"] = self.existing.to_list() + + # Module Exit Methods + + def fail_json(self, msg: str, **kwargs) -> None: + """ + Exit module with failure. + """ + self.add_logs_and_outputs() self.result.update(**kwargs) self.module.fail_json(msg=msg, **self.result) - - def exit_json(self, **kwargs): - self.add_logs_and_ouputs() - + + def exit_json(self, **kwargs) -> None: + """ + Exit module successfully. + """ + self.add_logs_and_outputs() + + # Add diff if module supports it if self.module._diff and self.result.get("changed") is True: - self.result["diff"] = dict( - before=self.previous.to_list(), - after=self.existing.to_list(), - ) - + try: + # Use diff-safe dicts (excludes sensitive fields) + before = [item.to_diff_dict() for item in self.previous] + after = [item.to_diff_dict() for item in self.existing] + + self.result["diff"] = dict( + before=before, + after=after + ) + except Exception: + pass # Don't fail on diff generation + self.result.update(**kwargs) self.module.exit_json(**self.result) From 0b36b2d1fc317f943aaecefddc91e594141dc98b Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Fri, 23 Jan 2026 14:37:44 -0500 Subject: [PATCH 032/109] [ignore] Modify nd_local_user based on Pydantic implementation and changes added to NDNetworkResourceModule. --- plugins/modules/nd_local_user.py | 91 +++++++++++++++----------------- 1 file changed, 43 insertions(+), 48 deletions(-) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 4a5f1ad2..3dcaf1a4 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -175,23 +175,34 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec, NDModule -from ansible_collections.cisco.nd.plugins.module_utils.nd_network_resources import NDNetworkResourceModule -from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING +# TODO: To be replaced with: +# from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec +# from ansible_collections.cisco.nd.plugins.module_utils.nd_network_resource_module import NDNetworkResourceModule +# from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel +# from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING +from module_utils.nd import nd_argument_spec +from module_utils.nd_network_resources import NDNetworkResourceModule +from module_utils.models.local_user import LocalUserModel +from module_utils.constants import USER_ROLES_MAPPING -# Actions overwrite functions -def query_all_local_users(nd): - return nd.query_obj(nd.path).get("localusers") +# NOTE: Maybe Add the overwrite action in the LocalUserModel +def query_all_local_users(nd_module): + """ + Custom query_all action to extract 'localusers' from response. + """ + response = nd_module.query_obj(nd_module.path) + return response.get("localusers", []) -# TODO: Adapt to Pydantic Model +# NOTE: Maybe Add More aliases like in the LocalUserModel / Revisit the argmument_spec def main(): argument_spec = nd_argument_spec() argument_spec.update( config=dict( type="list", elements="dict", + required=True, options=dict( email=dict(type="str"), login_id=dict(type="str", required=True), @@ -221,49 +232,33 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - - path = "/api/v1/infra/aaa/localUsers" - identifier_keys = ["loginID"] - actions_overwrite_map = {"query_all": query_all_local_users} - - nd = NDNetworkResourceModule(module, path, identifier_keys, actions_overwrite_map=actions_overwrite_map) - - state = nd.params.get("state") - config = nd.params.get("config") - override_exceptions = nd.params.get("override_exceptions") - new_config = [] - for object in config: - payload = { - "email": object.get("email"), - "firstName": object.get("first_name"), - "lastName": object.get("last_name"), - "loginID": object.get("login_id"), - "password": object.get("user_password"), - "remoteIDClaim": object.get("remote_id_claim"), - "xLaunch": object.get("remote_user_authorization"), - } - - if object.get("security_domains"): - payload["rbac"] = { - "domains": { - security_domain.get("name"): { - "roles": ( - [USER_ROLES_MAPPING.get(role) for role in security_domain["roles"]] if isinstance(security_domain.get("roles"), list) else [] - ) - } - for security_domain in object["security_domains"] - }, - } - if object.get("reuse_limitation") or object.get("time_interval_limitation"): - payload["passwordPolicy"] = { - "reuseLimitation": object.get("reuse_limitation"), - "timeIntervalLimitation": object.get("time_interval_limitation"), + + try: + # Create NDNetworkResourceModule with LocalUserModel + nd_module = NDNetworkResourceModule( + module=module, + path="/api/v1/infra/aaa/localUsers", + model_class=LocalUserModel, + actions_overwrite_map={ + "query_all": query_all_local_users } - new_config.append(payload) - - nd.manage_state(state=state, new_configs=new_config, unwanted_keys=[["passwordPolicy", "passwordChangeTime"], ["userID"]], override_exceptions=override_exceptions) + ) + + # Manage state + nd_module.manage_state( + state=module.params["state"], + new_configs=module.params["config"], + unwanted_keys=[ + ["passwordPolicy", "passwordChangeTime"], # Nested path + ["userID"] # Simple key + ], + override_exceptions=module.params.get("override_exceptions") + ) - nd.exit_json() + nd_module.exit_json() + + except Exception as e: + module.fail_json(msg=f"Module execution failed: {str(e)}") if __name__ == "__main__": From e37636f1a576aed2fd8d39a9db41ab02cb033bfd Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 17 Feb 2026 13:46:10 -0500 Subject: [PATCH 033/109] [ignore] Add api_endpoints for configuring endpoints and orchestrators for orchestrating crud api operations with model instances and endpoints. --- plugins/module_utils/api_endpoints/base.py | 178 ++++++++++++++++++ plugins/module_utils/api_endpoints/enums.py | 46 +++++ .../module_utils/api_endpoints/local_user.py | 178 ++++++++++++++++++ plugins/module_utils/api_endpoints/mixins.py | 25 +++ plugins/module_utils/orchestrators/base.py | 79 ++++++++ .../module_utils/orchestrators/local_user.py | 42 +++++ 6 files changed, 548 insertions(+) create mode 100644 plugins/module_utils/api_endpoints/base.py create mode 100644 plugins/module_utils/api_endpoints/enums.py create mode 100644 plugins/module_utils/api_endpoints/local_user.py create mode 100644 plugins/module_utils/api_endpoints/mixins.py create mode 100644 plugins/module_utils/orchestrators/base.py create mode 100644 plugins/module_utils/orchestrators/local_user.py diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py new file mode 100644 index 00000000..1a9cd768 --- /dev/null +++ b/plugins/module_utils/api_endpoints/base.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from abc import ABC, abstractmethod +from pydantic import BaseModel, ConfigDict +from typing import Final, Union, Tuple, Any + +IdentifierKey = Union[str, int, Tuple[Any, ...], None] + +class NDBaseSmartEndpoint(BaseModel, ABC): + + # TODO: maybe to be modified in the future + model_config = ConfigDict(validate_assignment=True) + + base_path: str + + @abstractmethod + @property + def path(self) -> str: + pass + + @abstractmethod + @property + def verb(self) -> str: + pass + + # TODO: Maybe to be modifed to be more Pydantic + # TODO: Maybe change function's name + # NOTE: function to set mixins fields from identifiers + @abstractmethod + def set_identifiers(self, identifier: IdentifierKey = None): + pass + + +class NDBasePath: + """ + # Summary + + Centralized API Base Paths + + ## Description + + Provides centralized base path definitions for all ND API endpoints. + This allows API path changes to be managed in a single location. + + ## Usage + + ```python + # Get a complete base path + path = BasePath.control_fabrics("MyFabric", "config-deploy") + # Returns: /appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/MyFabric/config-deploy + + # Build custom paths + path = BasePath.v1("custom", "endpoint") + # Returns: /appcenter/cisco/ndfc/api/v1/custom/endpoint + ``` + + ## Design Notes + + - All base paths are defined as class constants for easy modification + - Helper methods compose paths from base constants + - Use these methods in Pydantic endpoint models to ensure consistency + - If NDFC changes base API paths, only this class needs updating + """ + + # Root API paths + NDFC_API: Final = "/appcenter/cisco/ndfc/api" + ND_INFRA_API: Final = "/api/v1/infra" + ONEMANAGE: Final = "/onemanage" + LOGIN: Final = "/login" + + @classmethod + def api(cls, *segments: str) -> str: + """ + # Summary + + Build path from NDFC API root. + + ## Parameters + + - segments: Path segments to append + + ## Returns + + - Complete path string + + ## Example + + ```python + path = BasePath.api("custom", "endpoint") + # Returns: /appcenter/cisco/ndfc/api/custom/endpoint + ``` + """ + if not segments: + return cls.NDFC_API + return f"{cls.NDFC_API}/{'/'.join(segments)}" + + @classmethod + def v1(cls, *segments: str) -> str: + """ + # Summary + + Build v1 API path. + + ## Parameters + + - segments: Path segments to append after v1 + + ## Returns + + - Complete v1 API path + + ## Example + + ```python + path = BasePath.v1("lan-fabric", "rest") + # Returns: /appcenter/cisco/ndfc/api/v1/lan-fabric/rest + ``` + """ + return cls.api("v1", *segments) + + @classmethod + def nd_infra(cls, *segments: str) -> str: + """ + # Summary + + Build ND infra API path. + + ## Parameters + + - segments: Path segments to append after /api/v1/infra + + ## Returns + + - Complete ND infra API path + + ## Example + + ```python + path = BasePath.nd_infra("aaa", "localUsers") + # Returns: /api/v1/infra/aaa/localUsers + ``` + """ + if not segments: + return cls.ND_INFRA_API + return f"{cls.ND_INFRA_API}/{'/'.join(segments)}" + + @classmethod + def nd_infra_aaa(cls, *segments: str) -> str: + """ + # Summary + + Build ND infra AAA API path. + + ## Parameters + + - segments: Path segments to append after aaa (e.g., "localUsers") + + ## Returns + + - Complete ND infra AAA path + + ## Example + + ```python + path = BasePath.nd_infra_aaa("localUsers") + # Returns: /api/v1/infra/aaa/localUsers + ``` + """ + return cls.nd_infra("aaa", *segments) diff --git a/plugins/module_utils/api_endpoints/enums.py b/plugins/module_utils/api_endpoints/enums.py new file mode 100644 index 00000000..afb4dd5c --- /dev/null +++ b/plugins/module_utils/api_endpoints/enums.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Enums used in api_endpoints. +""" +from enum import Enum + + +class VerbEnum(str, Enum): + """ + # Summary + + Enum for HTTP verb values used in endpoints. + + ## Members + + - GET: Represents the HTTP GET method. + - POST: Represents the HTTP POST method. + - PUT: Represents the HTTP PUT method. + - DELETE: Represents the HTTP DELETE method. + """ + + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + + +class BooleanStringEnum(str, Enum): + """ + # Summary + + Enum for boolean string values used in query parameters. + + ## Members + + - TRUE: Represents the string "true". + - FALSE: Represents the string "false". + """ + + TRUE = "true" + FALSE = "false" \ No newline at end of file diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py new file mode 100644 index 00000000..de493e40 --- /dev/null +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Infra AAA LocalUsers endpoint models. + +This module contains endpoint definitions for LocalUsers-related operations +in the ND Infra AAA API. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +from typing import Literal, Union, Tuple, Any, Final +from mixins import LoginIdMixin +from enums import VerbEnum +from base import NDBaseSmartEndpoint, NDBasePath +from pydantic import Field + +IdentifierKey = Union[str, int, Tuple[Any, ...], None] + +class _EpApiV1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): + """ + Base class for ND Infra AAA Local Users endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/infra/aaa/localUsers endpoint. + """ + + base_path: Final = NDBasePath.nd_infra_aaa("localUsers") + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path. + + ## Returns + + - Complete endpoint path string, optionally including login_id + """ + if self.login_id is not None: + return NDBasePath.nd_infra_aaa("localUsers", self.login_id) + return self.base_path + + def set_identifiers(self, identifier: IdentifierKey = None): + self.login_id = identifier + + +class EpApiV1InfraAaaLocalUsersGet(_EpApiV1InfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users GET Endpoint + + ## Description + + Endpoint to retrieve local users from the ND Infra AAA service. + Optionally retrieve a specific local user by login_id. + + ## Path + + - /api/v1/infra/aaa/localUsers + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - GET + """ + + class_name: Literal["EpApiV1InfraAaaLocalUsersGet"] = Field( + default="EpApiV1InfraAaaLocalUsersGet", + description="Class name for backward compatibility", + frozen=True, + ) + + @property + def verb(self) -> VerbEnum: + """Return the HTTP verb for this endpoint.""" + return VerbEnum.GET + + +class EpApiV1InfraAaaLocalUsersPost(_EpApiV1InfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users POST Endpoint + + ## Description + + Endpoint to create a local user in the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers + + ## Verb + + - POST + """ + + class_name: Literal["EpApiV1InfraAaaLocalUsersPost"] = Field( + default="EpApiV1InfraAaaLocalUsersPost", + description="Class name for backward compatibility", + frozen=True, + ) + + @property + def verb(self) -> VerbEnum: + """Return the HTTP verb for this endpoint.""" + return VerbEnum.POST + + +class EpApiV1InfraAaaLocalUsersPut(_EpApiV1InfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users PUT Endpoint + + ## Description + + Endpoint to update a local user in the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - PUT + """ + + class_name: Literal["EpApiV1InfraAaaLocalUsersPut"] = Field( + default="EpApiV1InfraAaaLocalUsersPut", + description="Class name for backward compatibility", + frozen=True, + ) + + @property + def verb(self) -> VerbEnum: + """Return the HTTP verb for this endpoint.""" + return VerbEnum.PUT + + +class EpApiV1InfraAaaLocalUsersDelete(_EpApiV1InfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users DELETE Endpoint + + ## Description + + Endpoint to delete a local user from the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - DELETE + """ + + class_name: Literal["EpApiV1InfraAaaLocalUsersDelete"] = Field( + default="EpApiV1InfraAaaLocalUsersDelete", + description="Class name for backward compatibility", + frozen=True, + ) + + @property + def verb(self) -> VerbEnum: + """Return the HTTP verb for this endpoint.""" + return VerbEnum.DELETE diff --git a/plugins/module_utils/api_endpoints/mixins.py b/plugins/module_utils/api_endpoints/mixins.py new file mode 100644 index 00000000..8ff3218f --- /dev/null +++ b/plugins/module_utils/api_endpoints/mixins.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Reusable mixin classes for endpoint models. + +This module provides mixin classes that can be composed to add common +fields to endpoint models without duplication. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +from typing import TYPE_CHECKING, Optional +from pydantic import BaseModel, Field + + +class LoginIdMixin(BaseModel): + """Mixin for endpoints that require login_id parameter.""" + + login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") \ No newline at end of file diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py new file mode 100644 index 00000000..120ea475 --- /dev/null +++ b/plugins/module_utils/orchestrators/base.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..models.base import NDBaseModel +from ..nd import NDModule +from ..api_endpoints.base import NDBaseSmartEndpoint +from typing import Dict, List, Any, Union, ClassVar, Type +from pydantic import BaseModel + + +ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] + + +# TODO: Revisit naming them "Orchestrator" +class NDBaseOrchestrator(BaseModel): + + model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] + + # NOTE: if not defined by subclasses, return an error as they are required + post_endpoint: NDBaseSmartEndpoint + put_endpoint: NDBaseSmartEndpoint + delete_endpoint: NDBaseSmartEndpoint + get_endpoint: NDBaseSmartEndpoint + + # NOTE: Module Field is always required + # TODO: Replace it with future sender + module: NDModule + + # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") + # TODO: Explore how to make them even more general + def create(self, model_instance: NDBaseModel) -> ResponseType: + if self.module.check_mode: + return model_instance.model_dump() + + try: + return self.module.request(path=self.post_endpoint.base_path, method=self.post_endpoint.verb, data=model_instance.model_dump()) + except Exception as e: + raise Exception(f"Create failed for {model_instance.get_identifier_value()}: {e}") from e + + def update(self, model_instance: NDBaseModel) -> ResponseType: + if self.module.check_mode: + return model_instance.model_dump() + + try: + self.put_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=self.put_endpoint.path, method=self.put_endpoint.verb, data=model_instance.model_dump()) + except Exception as e: + raise Exception(f"Update failed for {self.current_identifier}: {e}") from e + + def delete(self, model_instance: NDBaseModel) -> ResponseType: + if self.module.check_mode: + return model_instance.model_dump() + + try: + self.delete_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=self.delete_endpoint.path, method=self.delete_endpoint.verb) + except Exception as e: + raise Exception(f"Delete failed for {self.current_identifier}: {e}") from e + + def query_one(self, model_instance: NDBaseModel) -> ResponseType: + try: + self.get_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=self.get_endpoint.path, method=self.get_endpoint.verb) + except Exception as e: + raise Exception(f"Query failed for {self.current_identifier}: {e}") from e + + def query_all(self) -> ResponseType: + try: + result = self.module.query_obj(self.get_endpoint.path) + return result or [] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e \ No newline at end of file diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py new file mode 100644 index 00000000..b156512c --- /dev/null +++ b/plugins/module_utils/orchestrators/local_user.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from .base import NDBaseOrchestrator +from ..models.local_user import LocalUserModel +from typing import Dict, List, Any, Union, Type +from ..api_endpoints.local_user import ( + EpApiV1InfraAaaLocalUsersPost, + EpApiV1InfraAaaLocalUsersPut, + EpApiV1InfraAaaLocalUsersDelete, + EpApiV1InfraAaaLocalUsersGet, +) + + +ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] + +class LocalUserOrchestrator(NDBaseOrchestrator): + + model_class = Type[LocalUserModel] + + post_endpoint = EpApiV1InfraAaaLocalUsersPost() + put_endpoint = EpApiV1InfraAaaLocalUsersPut() + delete_endpoint = EpApiV1InfraAaaLocalUsersDelete() + get_endpoint = EpApiV1InfraAaaLocalUsersGet() + + def query_all(self): + """ + Custom query_all action to extract 'localusers' from response. + """ + try: + result = self.module.query_obj(self.get_endpoint.base_path) + return result.get("localusers", []) or [] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e + \ No newline at end of file From fcde8c9b213b8b36294f37406da1440fd5573d8a Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 18 Feb 2026 01:23:37 -0500 Subject: [PATCH 034/109] [ignore] Modifiy models/local_user to take full advantage of Pydantic built-in functionalities. Slightly modify models/base.py to enforce identifiers definitions in NDBaseModel subclasses. Added multiple notes to assert next steps. --- plugins/module_utils/models/base.py | 48 ++++- plugins/module_utils/models/local_user.py | 216 ++++++++++++++-------- 2 files changed, 183 insertions(+), 81 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index a7eabf17..5a64c7a9 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -10,10 +10,11 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict -from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal +from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal, Optional from typing_extensions import Self +# TODO: Revisit identifiers strategy (low priority) class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. @@ -22,8 +23,9 @@ class NDBaseModel(BaseModel, ABC): - single: One unique required field (e.g., ["login_id"]) - composite: Multiple required fields as tuple (e.g., ["device", "interface"]) - hierarchical: Priority-ordered fields (e.g., ["uuid", "name"]) + - none: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) """ - + # TODO: revisit initial Model Configurations (low priority) model_config = ConfigDict( str_strip_whitespace=True, use_enum_values=True, @@ -31,14 +33,38 @@ class NDBaseModel(BaseModel, ABC): populate_by_name=True, extra='ignore' ) - - # Subclasses MUST define these - identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Literal["single", "composite", "hierarchical"]] = "single" + + # TODO: Revisit identifiers strategy (low priority) + identifiers: ClassVar[Optional[List[str]]] = None + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = None # Optional: fields to exclude from diffs (e.g., passwords) exclude_from_diff: ClassVar[List[str]] = [] + + # TODO: Revisit it with identifiers strategy (low priority) + def __init_subclass__(cls, **kwargs): + """ + Enforce configuration for identifiers definition. + """ + super().__init_subclass__(**kwargs) + + # Skip enforcement for nested models + # TODO: Remove if `NDNestedModel` is a separated BaseModel (low priority) + if cls.__name__ in ['NDNestedModel']: + return + + if not hasattr(cls, "identifiers") or cls.identifiers is None: + raise ValueError( + f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." + f"Example: `identifiers: ClassVar[Optional[List[str]]] = ['login_id']`" + ) + if not hasattr(cls, "identifier_strategy") or cls.identifier_strategy is None: + raise ValueError( + f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." + f"Example: `identifier_strategy: ClassVar[Optional[Literal['single', 'composite', 'hierarchical', 'none']]] = 'single'`" + ) + # NOTE: Might not need to make them absractmethod because of the Pydantic built-in methods (low priority) @abstractmethod def to_payload(self) -> Dict[str, Any]: """ @@ -54,6 +80,8 @@ def from_response(cls, response: Dict[str, Any]) -> Self: """ pass + # TODO: Revisit this function when revisiting identifier strategy (low priority) + # TODO: Add condition when there is no identifiers (high priority) def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: """ Extract identifier value(s) from this instance: @@ -82,7 +110,7 @@ def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: missing.append(field) values.append(value) - # NOTE: might not be needed in the future with field_validator + # NOTE: might be redefined with Pydantic (low priority) if missing: raise ValueError( f"Composite identifier fields {missing} are None. " @@ -104,6 +132,7 @@ def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: else: raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") + def to_diff_dict(self) -> Dict[str, Any]: """ Export for diff comparison (excludes sensitive fields). @@ -114,12 +143,13 @@ def to_diff_dict(self) -> Dict[str, Any]: exclude=set(self.exclude_from_diff) ) -# NOTE: Maybe make it a seperate BaseModel +# TODO: Make it a seperated BaseModel (low priority) class NDNestedModel(NDBaseModel): """ Base for nested models without identifiers. """ + # TODO: Configuration Fields to be clearly defined here (low priority) identifiers: ClassVar[List[str]] = [] def to_payload(self) -> Dict[str, Any]: @@ -133,4 +163,4 @@ def from_response(cls, response: Dict[str, Any]) -> Self: """ Create model instance from API response. """ - return cls.model_validate(response) + return cls.model_validate(response, by_alias=True) diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index b7069126..4be05991 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -8,15 +8,15 @@ __metaclass__ = type -from pydantic import Field, SecretStr +from pydantic import Field, SecretStr, model_serializer, field_serializer, field_validator, model_validator, computed_field from types import MappingProxyType from typing import List, Dict, Any, Optional, ClassVar, Literal from typing_extensions import Self # TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel, NDNestedModel -from models.base import NDBaseModel, NDNestedModel +from .base import NDBaseModel, NDNestedModel -# TODO: Move it to constants.py and import it +# TODO: Move it to constants.py and make a reverse class Map for this USER_ROLES_MAPPING = MappingProxyType({ "fabric_admin": "fabric-admin", "observer": "observer", @@ -31,11 +31,13 @@ class LocalUserSecurityDomainModel(NDNestedModel): """Security domain configuration for local user (nested model).""" # Fields - name: str - roles: Optional[List[str]] = None - - def to_payload(self) -> Dict[str, Any]: + name: str = Field(..., alias="name", exclude=True) + roles: Optional[List[str]] = Field(default=None, alias="roles", exclude=True) + + # -- Serialization (Model instance -> API payload) -- + @model_serializer() + def serialize_model(self) -> Dict: return { self.name: { "roles": [ @@ -44,22 +46,12 @@ def to_payload(self) -> Dict[str, Any]: ] } } - - @classmethod - def from_response(cls, name: str, domain_config: Dict[str, Any]) -> Self: - # NOTE: Maybe create a function from it to be moved to utils.py and to be imported - reverse_mapping = {value: key for key, value in USER_ROLES_MAPPING.items()} - - return cls( - name=name, - roles=[ - reverse_mapping.get(role, role) - for role in domain_config.get("roles", []) - ] - ) + # -- Deserialization (API response / Ansible payload -> Model instance) -- + # NOTE: Not needed as it already defined in `LocalUserModel` -> investigate if needed +# TODO: Add field validation (e.g. me, le, choices, etc...) (medium priority) class LocalUserModel(NDBaseModel): """ Local user configuration. @@ -68,73 +60,153 @@ class LocalUserModel(NDBaseModel): """ # Identifier configuration - identifiers: ClassVar[List[str]] = ["login_id"] - identifier_strategy: ClassVar[Literal["single", "composite", "hierarchical"]] = "single" + # TODO: Revisit this identifiers strategy (low priority) + identifiers: ClassVar[Optional[List[str]]] = ["login_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = "single" + + # Keys management configurations + # TODO: Revisit these configurations (low priority) exclude_from_diff: ClassVar[List[str]] = ["user_password"] + unwanted_keys: ClassVar[List[List[str]]]= [ + ["passwordPolicy", "passwordChangeTime"], # Nested path + ["userID"] # Simple key + ] # Fields + # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec login_id: str = Field(..., alias="loginID") - email: Optional[str] = None + email: Optional[str] = Field(default=None, alias="email") first_name: Optional[str] = Field(default=None, alias="firstName") last_name: Optional[str] = Field(default=None, alias="lastName") user_password: Optional[SecretStr] = Field(default=None, alias="password") - reuse_limitation: Optional[int] = Field(default=None, alias="reuseLimitation") - time_interval_limitation: Optional[int] = Field(default=None, alias="timeIntervalLimitation") - security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(default=None, alias="domains") + reuse_limitation: Optional[int] = Field(default=None, alias="reuseLimitation", exclude=True) + time_interval_limitation: Optional[int] = Field(default=None, alias="timeIntervalLimitation", exclude=True) + security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(default=None, alias="rbac") remote_id_claim: Optional[str] = Field(default=None, alias="remoteIDClaim") remote_user_authorization: Optional[bool] = Field(default=None, alias="xLaunch") - + + # -- Serialization (Model instance -> API payload) -- + + @computed_field(alias="passwordPolicy") + @property + def password_policy(self) -> Optional[Dict[str, int]]: + """Computed nested structure for API payload.""" + if self.reuse_limitation is None and self.time_interval_limitation is None: + return None + + policy = {} + if self.reuse_limitation is not None: + policy["reuseLimitation"] = self.reuse_limitation + if self.time_interval_limitation is not None: + policy["timeIntervalLimitation"] = self.time_interval_limitation + return policy + + @field_serializer("user_password") + def serialize_password(self, value: Optional[SecretStr]) -> Optional[str]: + return value.get_secret_value() if value else None + + + @field_serializer("security_domains") + def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) -> Optional[Dict]: + # NOTE: exclude `None` values and empty list (-> should we exclude empty list?) + if not value: + return None + + domains_dict = {} + for domain in value: + domains_dict.update(domain.to_payload()) + + return { + "domains": domains_dict + } + + def to_payload(self) -> Dict[str, Any]: - payload = self.model_dump( - by_alias=True, - exclude={ - 'domains', - 'security_domains', - 'reuseLimitation', - 'reuse_limitation', - 'timeIntervalLimitation', - 'time_interval_limitation' - }, - exclude_none=True - ) + return self.model_dump(by_alias=True, exclude_none=True) - if self.user_password: - payload["password"] = self.user_password.get_secret_value() + # -- Deserialization (API response / Ansible payload -> Model instance) -- - if self.security_domains: - payload["rbac"] = {"domains": {}} - for domain in self.security_domains: - payload["rbac"]["domains"].update(domain.to_payload()) + @model_validator(mode="before") + @classmethod + def deserialize_password_policy(cls, data: Any) -> Any: + if not isinstance(data, dict): + return data - if self.reuse_limitation is not None or self.time_interval_limitation is not None: - payload["passwordPolicy"] = {} - if self.reuse_limitation is not None: - payload["passwordPolicy"]["reuseLimitation"] = self.reuse_limitation - if self.time_interval_limitation is not None: - payload["passwordPolicy"]["timeIntervalLimitation"] = self.time_interval_limitation + password_policy = data.get("passwordPolicy") - return payload - + if password_policy and isinstance(password_policy, dict): + if "reuseLimitation" in password_policy: + data["reuse_limitation"] = password_policy["reuseLimitation"] + if "timeIntervalLimitation" in password_policy: + data["time_interval_limitation"] = password_policy["timeIntervalLimitation"] + + # Remove the nested structure from data to avoid conflicts + # (since it's a computed field, not a real field) + data.pop("passwordPolicy", None) + + return data + + @field_validator("security_domains", mode="before") @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: - password_policy = response.get("passwordPolicy", {}) - rbac = response.get("rbac", {}) - domains = rbac.get("domains", {}) + def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: + if value is None: + return None + + # If already in list format (Ansible module representation), return as-is + if isinstance(value, list): + return value + + # If in the nested dict format (API representation) + if isinstance(value, dict) and "domains" in value: + domains_dict = value["domains"] + domains_list = [] + + for domain_name, domain_data in domains_dict.items(): + domains_list.append({ + "name": domain_name, + "roles": [USER_ROLES_MAPPING.get(role, role) for role in domain_data.get("roles", [])] + }) + + return domains_list - security_domains = [ - LocalUserSecurityDomainModel.from_response(name, config) - for name, config in domains.items() - ] if domains else None + return value + + # TODO: only works for api responses but NOT for Ansible configs -> needs to be fixed (high priority) + @classmethod + def from_response(cls, response: Dict[str, Any]) -> Self: + return cls.model_validate(response, by_alias=True) - return cls( - login_id=response.get("loginID"), - email=response.get("email"), - first_name=response.get("firstName"), - last_name=response.get("lastName"), - user_password=response.get("password"), - reuse_limitation=password_policy.get("reuseLimitation"), - time_interval_limitation=password_policy.get("timeIntervalLimitation"), - security_domains=security_domains, - remote_id_claim=response.get("remoteIDClaim"), - remote_user_authorization=response.get("xLaunch") + + # -- Extra -- + + # TODO: to generate from Fields (low priority) + def get_argument_spec(self): + return dict( + config=dict( + type="list", + elements="dict", + required=True, + options=dict( + email=dict(type="str"), + login_id=dict(type="str", required=True), + first_name=dict(type="str"), + last_name=dict(type="str"), + user_password=dict(type="str", no_log=True), + reuse_limitation=dict(type="int"), + time_interval_limitation=dict(type="int"), + security_domains=dict( + type="list", + elements="dict", + options=dict( + name=dict(type="str", required=True, aliases=["security_domain_name", "domain_name"]), + roles=dict(type="list", elements="str", choices=list(USER_ROLES_MAPPING)), + ), + aliases=["domains"], + ), + remote_id_claim=dict(type="str"), + remote_user_authorization=dict(type="bool"), + ), + ), + override_exceptions=dict(type="list", elements="str"), + state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), ) From 5cf2a4863fc5d220ef72ba2da3da5bb437e74461 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 24 Feb 2026 12:57:37 -0500 Subject: [PATCH 035/109] [ignore] Adapt the Network Resource Module architecture for ND to smart endpoints and Pydantic models modification (works for merge and replace states). Add comments for next steps. --- plugins/module_utils/api_endpoints/base.py | 5 +- .../module_utils/api_endpoints/local_user.py | 1 + plugins/module_utils/models/base.py | 25 ++- plugins/module_utils/models/local_user.py | 12 +- plugins/module_utils/nd_config_collection.py | 76 ++------ plugins/module_utils/nd_network_resources.py | 163 ++++++------------ plugins/module_utils/orchestrators/base.py | 27 +-- .../module_utils/orchestrators/local_user.py | 12 +- plugins/module_utils/utils.py | 26 ++- plugins/modules/nd_local_user.py | 63 +------ 10 files changed, 159 insertions(+), 251 deletions(-) diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 1a9cd768..747c3283 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -15,11 +15,14 @@ IdentifierKey = Union[str, int, Tuple[Any, ...], None] +# TODO: Rename it to APIEndpoint +# NOTE: This is a very minimalist endpoint package -> needs to be enhanced class NDBaseSmartEndpoint(BaseModel, ABC): # TODO: maybe to be modified in the future model_config = ConfigDict(validate_assignment=True) + # TODO: to remove base_path: str @abstractmethod @@ -34,7 +37,7 @@ def verb(self) -> str: # TODO: Maybe to be modifed to be more Pydantic # TODO: Maybe change function's name - # NOTE: function to set mixins fields from identifiers + # NOTE: function to set endpoints attribute fields from identifiers @abstractmethod def set_identifiers(self, identifier: IdentifierKey = None): pass diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py index de493e40..61f52ad8 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -31,6 +31,7 @@ class _EpApiV1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): /api/v1/infra/aaa/localUsers endpoint. """ + # TODO: Remove it base_path: Final = NDBasePath.nd_infra_aaa("localUsers") @property diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 5a64c7a9..db7fd9ae 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -40,6 +40,7 @@ class NDBaseModel(BaseModel, ABC): # Optional: fields to exclude from diffs (e.g., passwords) exclude_from_diff: ClassVar[List[str]] = [] + unwanted_keys: ClassVar[List] = [] # TODO: Revisit it with identifiers strategy (low priority) def __init_subclass__(cls, **kwargs): @@ -65,8 +66,9 @@ def __init_subclass__(cls, **kwargs): ) # NOTE: Might not need to make them absractmethod because of the Pydantic built-in methods (low priority) + # NOTE: Should we use keyword arguments? @abstractmethod - def to_payload(self) -> Dict[str, Any]: + def to_payload(self, **kwargs) -> Dict[str, Any]: """ Convert model to API payload format. """ @@ -74,7 +76,7 @@ def to_payload(self) -> Dict[str, Any]: @classmethod @abstractmethod - def from_response(cls, response: Dict[str, Any]) -> Self: + def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: """ Create model instance from API response. """ @@ -142,6 +144,25 @@ def to_diff_dict(self) -> Dict[str, Any]: exclude_none=True, exclude=set(self.exclude_from_diff) ) + + # NOTE: initialize and return a deep copy of the instance? + # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... + def merge(self, other_model: "NDBaseModel") -> Self: + if not isinstance(other_model, type(self)): + # TODO: Change error message + return TypeError("models are not of the same type.") + + for field, value in other_model: + if value is None: + continue + + current_value = getattr(self, field) + if isinstance(current_value, NDBaseModel) and isinstance(value, NDBaseModel): + setattr(self, field, current_value.merge(value)) + + else: + setattr(self, field, value) + return self # TODO: Make it a seperated BaseModel (low priority) class NDNestedModel(NDBaseModel): diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 4be05991..ea511097 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -67,14 +67,14 @@ class LocalUserModel(NDBaseModel): # Keys management configurations # TODO: Revisit these configurations (low priority) exclude_from_diff: ClassVar[List[str]] = ["user_password"] - unwanted_keys: ClassVar[List[List[str]]]= [ + unwanted_keys: ClassVar[List]= [ ["passwordPolicy", "passwordChangeTime"], # Nested path ["userID"] # Simple key ] # Fields # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec - login_id: str = Field(..., alias="loginID") + login_id: str = Field(alias="loginID") email: Optional[str] = Field(default=None, alias="email") first_name: Optional[str] = Field(default=None, alias="firstName") last_name: Optional[str] = Field(default=None, alias="lastName") @@ -121,8 +121,8 @@ def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) } - def to_payload(self) -> Dict[str, Any]: - return self.model_dump(by_alias=True, exclude_none=True) + def to_payload(self, **kwargs) -> Dict[str, Any]: + return self.model_dump(by_alias=True, exclude_none=True, **kwargs) # -- Deserialization (API response / Ansible payload -> Model instance) -- @@ -173,8 +173,8 @@ def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: # TODO: only works for api responses but NOT for Ansible configs -> needs to be fixed (high priority) @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: - return cls.model_validate(response, by_alias=True) + def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: + return cls.model_validate(response, by_alias=True, **kwargs) # -- Extra -- diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 2f256d30..a25287aa 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -12,24 +12,26 @@ from copy import deepcopy # TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from models.base import NDBaseModel +from .models.base import NDBaseModel +from .utils import issubset # Type aliases # NOTE: Maybe add more type aliases in the future if needed ModelType = TypeVar('ModelType', bound=NDBaseModel) +# TODO: Defined the same acros multiple files -> maybe move to constants.py IdentifierKey = Union[str, int, Tuple[Any, ...]] - +# TODO:Might make it a Pydantic RootModel (low priority but medium impact on NDNetworkResourceModule) class NDConfigCollection(Generic[ModelType]): """ Nexus Dashboard configuration collection for NDBaseModel instances. """ - def __init__(self, model_class: type[ModelType], items: Optional[List[ModelType]] = None): + def __init__(self, model_class: ModelType, items: Optional[List[ModelType]] = None): """ Initialize collection. """ - self._model_class = model_class + self._model_class: ModelType = model_class # Dual storage self._items: List[ModelType] = [] @@ -39,6 +41,7 @@ def __init__(self, model_class: type[ModelType], items: Optional[List[ModelType] for item in items: self.add(item) + # TODO: might not be necessary def _extract_key(self, item: ModelType) -> IdentifierKey: """ Extract identifier key from item. @@ -48,6 +51,7 @@ def _extract_key(self, item: ModelType) -> IdentifierKey: except Exception as e: raise ValueError(f"Failed to extract identifier: {e}") from e + # TODO: optimize it -> only needed for delete method (low priority) def _rebuild_index(self) -> None: """Rebuild index from scratch (O(n) operation).""" self._index.clear() @@ -105,8 +109,8 @@ def replace(self, item: ModelType) -> bool: self._items[index] = item return True - - def merge(self, item: ModelType, custom_merge_function: Optional[Callable[[ModelType, ModelType], ModelType]] = None) -> ModelType: + + def merge(self, item: ModelType) -> ModelType: """ Merge item with existing, or add if not present. """ @@ -116,35 +120,11 @@ def merge(self, item: ModelType, custom_merge_function: Optional[Callable[[Model if existing is None: self.add(item) return item - - # Custom or default merge - if custom_merge_function: - merged = custom_merge_function(existing, item) else: - # Default merge - existing_data = existing.model_dump() - new_data = item.model_dump(exclude_unset=True) - merged_data = self._deep_merge(existing_data, new_data) - merged = self._model_class.model_validate(merged_data) - + merged = existing.merge(item) self.replace(merged) return merged - - def _deep_merge(self, base: Dict, update: Dict) -> Dict: - """Recursively merge dictionaries.""" - result = base.copy() - - for key, value in update.items(): - if value is None: - continue - - if key in result and isinstance(result[key], dict) and isinstance(value, dict): - result[key] = self._deep_merge(result[key], value) - else: - result[key] = value - - return result - + def delete(self, key: IdentifierKey) -> bool: """ Delete item by identifier (O(n) operation due to index rebuild) @@ -161,6 +141,7 @@ def delete(self, key: IdentifierKey) -> bool: # Diff Operations + # NOTE: Maybe add a similar one in the NDBaseModel (-> but is it necessary?) def get_diff_config(self, new_item: ModelType, unwanted_keys: Optional[List[Union[str, List[str]]]] = None) -> Literal["new", "no_diff", "changed"]: """ Compare single item against collection. @@ -182,7 +163,7 @@ def get_diff_config(self, new_item: ModelType, unwanted_keys: Optional[List[Unio existing_data = self._remove_unwanted_keys(existing_data, unwanted_keys) new_data = self._remove_unwanted_keys(new_data, unwanted_keys) - is_subset = self._issubset(new_data, existing_data) + is_subset = issubset(new_data, existing_data) return "no_diff" if is_subset else "changed" @@ -214,28 +195,7 @@ def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[I other_keys = set(other.keys()) return list(current_keys - other_keys) - def _issubset(self, subset: Any, superset: Any) -> bool: - """Check if subset is contained in superset.""" - if type(subset) is not type(superset): - return False - - if not isinstance(subset, dict): - if isinstance(subset, list): - return all(item in superset for item in subset) - return subset == superset - - for key, value in subset.items(): - if value is None: - continue - - if key not in superset: - return False - - if not self._issubset(value, superset[key]): - return False - - return True - + # TODO: Maybe not necessary def _remove_unwanted_keys(self, data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: """Remove unwanted keys from dict (supports nested paths).""" data = deepcopy(data) @@ -282,8 +242,8 @@ def copy(self) -> "NDConfigCollection[ModelType]": items=deepcopy(self._items) ) - # Serialization - + # Collection Serialization + def to_list(self, **kwargs) -> List[Dict]: """ Export as list of dicts (with aliases). @@ -301,7 +261,7 @@ def from_list(cls, data: List[Dict], model_class: type[ModelType]) -> "NDConfigC """ Create collection from list of dicts. """ - items = [model_class.model_validate(item_data) for item_data in data] + items = [model_class.model_validate(item_data, by_name=True) for item_data in data] return cls(model_class=model_class, items=items) @classmethod diff --git a/plugins/module_utils/nd_network_resources.py b/plugins/module_utils/nd_network_resources.py index ab7df9e2..d52fb9de 100644 --- a/plugins/module_utils/nd_network_resources.py +++ b/plugins/module_utils/nd_network_resources.py @@ -9,8 +9,9 @@ __metaclass__ = type from copy import deepcopy -from typing import Optional, List, Dict, Any, Callable, Literal +from typing import Optional, List, Dict, Any, Literal, Type from pydantic import ValidationError +from ansible.module_utils.basic import AnsibleModule # TODO: To be replaced with: # from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule @@ -20,36 +21,48 @@ from nd import NDModule from nd_config_collection import NDConfigCollection from models.base import NDBaseModel +from .orchestrators.base import NDBaseOrchestrator from constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED - +# TODO: replace path and verbs with smart Endpoint (Top priority) +# TODO: Rename it (low priority) +# TODO: Revisit Deserialization in every method (high priority) class NDNetworkResourceModule(NDModule): """ Generic Network Resource Module for Nexus Dashboard. """ - def __init__(self, module, path: str, model_class: type[NDBaseModel], actions_overwrite_map: Optional[Dict[str, Callable]] = None): + def __init__(self, module: AnsibleModule, model_class: Type[NDBaseModel], model_orchestrator: Type[NDBaseOrchestrator]): """ Initialize the Network Resource Module. """ + # TODO: Revisit Module initialization and configuration (medium priority). e.g., use instead: + # nd_module = NDModule() super().__init__(module) # Configuration - self.path = path + # TODO: make sure `model_class` is the same as the one in `model_orchestrator`. if not, error out (high priority) self.model_class = model_class - self.actions_overwrite_map = actions_overwrite_map or {} + self.model_orchestrator = model_orchestrator(module=module) + # TODO: Revisit these class variables when udpating Module intialization and configuration (medium priority) + self.state = self.params["state"] + self.ansible_config = self.params["config"] + # Initialize collections + # TODO: Revisit collections initialization especially `init_all_data` (medium priority) + # TODO: Revisit class variables `previous`, `existing`, etc... (medium priority) + self.nd_config_collection = NDConfigCollection[model_class] try: - init_all_data = self._query_all() + init_all_data = self.model_orchestrator.query_all() - self.existing = NDConfigCollection.from_api_response( + self.existing = self.nd_config_collection.from_api_response( response_data=init_all_data, model_class=model_class ) - self.previous = NDConfigCollection(model_class=model_class) - self.proposed = NDConfigCollection(model_class=model_class) - self.sent = NDConfigCollection(model_class=model_class) + self.previous = self.nd_config_collection(model_class=model_class) + self.proposed = self.nd_config_collection(model_class=model_class) + self.sent = self.nd_config_collection(model_class=model_class) except Exception as e: self.fail_json( @@ -59,83 +72,10 @@ def __init__(self, module, path: str, model_class: type[NDBaseModel], actions_ov # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] - - # Current operation context - self.current_identifier = None - self.existing_config: Dict[str, Any] = {} - self.proposed_config: Dict[str, Any] = {} - - # Action Decorator - - @staticmethod - def actions_overwrite(action: str): - """ - Decorator to allow overriding default action operations. - """ - def decorator(func): - def wrapper(self, *args, **kwargs): - overwrite_action = self.actions_overwrite_map.get(action) - if callable(overwrite_action): - return overwrite_action(self, *args, **kwargs) - else: - return func(self, *args, **kwargs) - return wrapper - return decorator - - # Action Operations - - @actions_overwrite("create") - def _create(self) -> Optional[Dict[str, Any]]: - """ - Create a new configuration object. - """ - if self.module.check_mode: - return self.proposed_config - - try: - return self.request(path=self.path, method="POST", data=self.proposed_config) - except Exception as e: - raise Exception(f"Create failed for {self.current_identifier}: {e}") from e - - @actions_overwrite("update") - def _update(self) -> Optional[Dict[str, Any]]: - """ - Update an existing configuration object. - """ - if self.module.check_mode: - return self.proposed_config - - try: - object_path = f"{self.path}/{self.current_identifier}" - return self.request(path=object_path, method="PUT", data=self.proposed_config) - except Exception as e: - raise Exception(f"Update failed for {self.current_identifier}: {e}") from e - - @actions_overwrite("delete") - def _delete(self) -> None: - """Delete a configuration object.""" - if self.module.check_mode: - return - - try: - object_path = f"{self.path}/{self.current_identifier}" - self.request(path=object_path, method="DELETE") - except Exception as e: - raise Exception(f"Delete failed for {self.current_identifier}: {e}") from e - - @actions_overwrite("query_all") - def _query_all(self) -> List[Dict[str, Any]]: - """ - Query all configuration objects from device. - """ - try: - result = self.query_obj(self.path) - return result or [] - except Exception as e: - raise Exception(f"Query all failed: {e}") from e - + # Logging - + # NOTE: format log placeholder + # TODO: use a proper logger (low priority) def format_log(self, identifier, status: Literal["created", "updated", "deleted", "no_change"], after_data: Optional[Dict[str, Any]] = None, sent_payload_data: Optional[Dict[str, Any]] = None) -> None: """ Create and append a log entry. @@ -159,20 +99,20 @@ def format_log(self, identifier, status: Literal["created", "updated", "deleted" self.nd_logs.append(log_entry) - # State Management - - def manage_state( - self, state: Literal["merged", "replaced", "overridden", "deleted"], new_configs: List[Dict[str, Any]], unwanted_keys: Optional[List] = None, override_exceptions: Optional[List] = None) -> None: + # State Management (core function) + # TODO: adapt all `manage` functions to endpoint/orchestrator strategies (Top priority) + def manage_state(self) -> None: """ Manage state according to desired configuration. """ unwanted_keys = unwanted_keys or [] - override_exceptions = override_exceptions or [] # Parse and validate configs + # TODO: move it to init() (top priority) + # TODO: Modify it if NDConfigCollection becomes a Pydantic RootModel (low priority) try: parsed_items = [] - for config in new_configs: + for config in self.ansible_config: try: # Parse config into model item = self.model_class.model_validate(config) @@ -186,7 +126,7 @@ def manage_state( return # Create proposed collection - self.proposed = NDConfigCollection( + self.proposed = self.nd_config_collection( model_class=self.model_class, items=parsed_items ) @@ -202,27 +142,29 @@ def manage_state( return # Execute state operations - if state in ["merged", "replaced", "overridden"]: - self._manage_create_update_state(state, unwanted_keys) + if self.state in ["merged", "replaced", "overridden"]: + self._manage_create_update_state() - if state == "overridden": - self._manage_override_deletions(override_exceptions) + if self.state == "overridden": + self._manage_override_deletions() - elif state == "deleted": + elif self.state == "deleted": self._manage_delete_state() + # TODO: not needed with Ansible `argument_spec` validation. Keep it for now but needs to be removed (low priority) else: - self.fail_json(msg=f"Invalid state: {state}") + self.fail_json(msg=f"Invalid state: {self.state}") - def _manage_create_update_state(self,state: Literal["merged", "replaced", "overridden"], unwanted_keys: List) -> None: + + def _manage_create_update_state(self) -> None: """ Handle merged/replaced/overridden states. """ for proposed_item in self.proposed: try: # Extract identifier + # TODO: Remove self.current_identifier, get it directly into the action functions identifier = proposed_item.get_identifier_value() - self.current_identifier = identifier existing_item = self.existing.get(identifier) self.existing_config = ( @@ -232,10 +174,7 @@ def _manage_create_update_state(self,state: Literal["merged", "replaced", "overr ) # Determine diff status - diff_status = self.existing.get_diff_config( - proposed_item, - unwanted_keys=unwanted_keys - ) + diff_status = self.existing.get_diff_config(proposed_item) # No changes needed if diff_status == "no_diff": @@ -247,7 +186,7 @@ def _manage_create_update_state(self,state: Literal["merged", "replaced", "overr continue # Prepare final config based on state - if state == "merged" and existing_item: + if self.state == "merged" and existing_item: # Merge with existing merged_item = self.existing.merge(proposed_item) final_item = merged_item @@ -264,16 +203,16 @@ def _manage_create_update_state(self,state: Literal["merged", "replaced", "overr # Execute API operation if diff_status == "changed": - response = self._update() + response = self.model_orchestrator.update(final_item) operation_status = "updated" else: - response = self._create() + response = self.model_orchestrator.create(final_item) operation_status = "created" # Track sent payload if not self.module.check_mode: self.sent.add(final_item) - sent_payload = self.proposed_config + sent_payload = final_item else: sent_payload = None @@ -297,7 +236,7 @@ def _manage_create_update_state(self,state: Literal["merged", "replaced", "overr after_data=self.existing_config ) - if not self.module.params.get("ignore_errors", False): + if not self.params.get("ignore_errors", False): self.fail_json( msg=error_msg, identifier=str(identifier), @@ -305,6 +244,7 @@ def _manage_create_update_state(self,state: Literal["merged", "replaced", "overr ) return + # TODO: Refactor with orchestrator (Top priority) def _manage_override_deletions(self, override_exceptions: List) -> None: """ Delete items not in proposed config (for overridden state). @@ -351,6 +291,7 @@ def _manage_override_deletions(self, override_exceptions: List) -> None: ) return + # TODO: Refactor with orchestrator (Top priority) def _manage_delete_state(self) -> None: """Handle deleted state.""" for proposed_item in self.proposed: @@ -398,7 +339,7 @@ def _manage_delete_state(self) -> None: return # Output Formatting - + # TODO: move to separate Class (results) -> align it with rest_send PR def add_logs_and_outputs(self) -> None: """Add logs and outputs to module result based on output_level.""" output_level = self.params.get("output_level", "normal") diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 120ea475..e2d9fa75 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -24,39 +24,43 @@ class NDBaseOrchestrator(BaseModel): model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] # NOTE: if not defined by subclasses, return an error as they are required - post_endpoint: NDBaseSmartEndpoint - put_endpoint: NDBaseSmartEndpoint - delete_endpoint: NDBaseSmartEndpoint - get_endpoint: NDBaseSmartEndpoint + # TODO: change name from http method to crud (e.g. post -> create) + post_endpoint: Type[NDBaseSmartEndpoint] + put_endpoint: Type[NDBaseSmartEndpoint] + delete_endpoint: Type[NDBaseSmartEndpoint] + get_endpoint: Type[NDBaseSmartEndpoint] # NOTE: Module Field is always required # TODO: Replace it with future sender module: NDModule # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") - # TODO: Explore how to make them even more general + # TODO: Explore new ways to make them even more general + # TODO: Revisit Deserialization def create(self, model_instance: NDBaseModel) -> ResponseType: if self.module.check_mode: - return model_instance.model_dump() + return model_instance.to_payload() try: - return self.module.request(path=self.post_endpoint.base_path, method=self.post_endpoint.verb, data=model_instance.model_dump()) + api_endpoint = self.post_endpoint() + return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Create failed for {model_instance.get_identifier_value()}: {e}") from e + # TODO: Make the same changes as create() with local api_endpoint variable def update(self, model_instance: NDBaseModel) -> ResponseType: if self.module.check_mode: - return model_instance.model_dump() + return model_instance.to_payload() try: self.put_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=self.put_endpoint.path, method=self.put_endpoint.verb, data=model_instance.model_dump()) + return self.module.request(path=self.put_endpoint.path, method=self.put_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Update failed for {self.current_identifier}: {e}") from e def delete(self, model_instance: NDBaseModel) -> ResponseType: if self.module.check_mode: - return model_instance.model_dump() + return model_instance.to_payload() try: self.delete_endpoint.set_identifiers(model_instance.get_identifier_value()) @@ -71,7 +75,8 @@ def query_one(self, model_instance: NDBaseModel) -> ResponseType: except Exception as e: raise Exception(f"Query failed for {self.current_identifier}: {e}") from e - def query_all(self) -> ResponseType: + # TODO: Revisit the straegy around the query_all (see local_user's case) + def query_all(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: result = self.module.query_obj(self.get_endpoint.path) return result or [] diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index b156512c..3810fa83 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -9,8 +9,10 @@ __metaclass__ = type from .base import NDBaseOrchestrator +from ..models.base import NDBaseModel from ..models.local_user import LocalUserModel from typing import Dict, List, Any, Union, Type +from ..api_endpoints.base import NDBaseSmartEndpoint from ..api_endpoints.local_user import ( EpApiV1InfraAaaLocalUsersPost, EpApiV1InfraAaaLocalUsersPut, @@ -23,12 +25,12 @@ class LocalUserOrchestrator(NDBaseOrchestrator): - model_class = Type[LocalUserModel] + model_class: Type[NDBaseModel] = LocalUserModel - post_endpoint = EpApiV1InfraAaaLocalUsersPost() - put_endpoint = EpApiV1InfraAaaLocalUsersPut() - delete_endpoint = EpApiV1InfraAaaLocalUsersDelete() - get_endpoint = EpApiV1InfraAaaLocalUsersGet() + post_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost + put_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPut + delete_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersDelete + get_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet def query_all(self): """ diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 5bf0a0f0..72ccbcd7 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -9,6 +9,7 @@ __metaclass__ = type from copy import deepcopy +from typing import Any def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -29,4 +30,27 @@ def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remo for index, item in enumerate(v): if isinstance(item, dict): result[k][index] = sanitize_dict(item, keys, values) - return result \ No newline at end of file + return result + + +def issubset(subset: Any, superset: Any) -> bool: + """Check if subset is contained in superset.""" + if type(subset) is not type(superset): + return False + + if not isinstance(subset, dict): + if isinstance(subset, list): + return all(item in superset for item in subset) + return subset == superset + + for key, value in subset.items(): + if value is None: + continue + + if key not in superset: + return False + + if not issubset(value, superset[key]): + return False + + return True diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 3dcaf1a4..901549fb 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -180,53 +180,15 @@ # from ansible_collections.cisco.nd.plugins.module_utils.nd_network_resource_module import NDNetworkResourceModule # from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel # from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING -from module_utils.nd import nd_argument_spec -from module_utils.nd_network_resources import NDNetworkResourceModule -from module_utils.models.local_user import LocalUserModel -from module_utils.constants import USER_ROLES_MAPPING +from ..module_utils.nd import nd_argument_spec +from ..module_utils.nd_network_resources import NDNetworkResourceModule +from ..module_utils.models.local_user import LocalUserModel +from ..module_utils.orchestrators.local_user import LocalUserOrchestrator -# NOTE: Maybe Add the overwrite action in the LocalUserModel -def query_all_local_users(nd_module): - """ - Custom query_all action to extract 'localusers' from response. - """ - response = nd_module.query_obj(nd_module.path) - return response.get("localusers", []) - - -# NOTE: Maybe Add More aliases like in the LocalUserModel / Revisit the argmument_spec def main(): argument_spec = nd_argument_spec() - argument_spec.update( - config=dict( - type="list", - elements="dict", - required=True, - options=dict( - email=dict(type="str"), - login_id=dict(type="str", required=True), - first_name=dict(type="str"), - last_name=dict(type="str"), - user_password=dict(type="str", no_log=True), - reuse_limitation=dict(type="int"), - time_interval_limitation=dict(type="int"), - security_domains=dict( - type="list", - elements="dict", - options=dict( - name=dict(type="str", required=True, aliases=["security_domain_name", "domain_name"]), - roles=dict(type="list", elements="str", choices=list(USER_ROLES_MAPPING)), - ), - aliases=["domains"], - ), - remote_id_claim=dict(type="str"), - remote_user_authorization=dict(type="bool"), - ), - ), - override_exceptions=dict(type="list", elements="str"), - state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), - ) + argument_spec.update(LocalUserModel.get_argument_spec()) module = AnsibleModule( argument_spec=argument_spec, @@ -237,23 +199,12 @@ def main(): # Create NDNetworkResourceModule with LocalUserModel nd_module = NDNetworkResourceModule( module=module, - path="/api/v1/infra/aaa/localUsers", model_class=LocalUserModel, - actions_overwrite_map={ - "query_all": query_all_local_users - } + model_orchestrator=LocalUserOrchestrator, ) # Manage state - nd_module.manage_state( - state=module.params["state"], - new_configs=module.params["config"], - unwanted_keys=[ - ["passwordPolicy", "passwordChangeTime"], # Nested path - ["userID"] # Simple key - ], - override_exceptions=module.params.get("override_exceptions") - ) + nd_module.manage_state() nd_module.exit_json() From 6c411bcf7661dc444f902e11959a74a0837abaf2 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 25 Feb 2026 08:24:28 -0500 Subject: [PATCH 036/109] [ignore] Default to none and update condition for regarding in models/base.py. --- plugins/module_utils/models/base.py | 8 +++++--- plugins/module_utils/models/local_user.py | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index db7fd9ae..4ddeacd0 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -15,6 +15,7 @@ # TODO: Revisit identifiers strategy (low priority) +# TODO: add kwargs to every sub method class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. @@ -26,6 +27,7 @@ class NDBaseModel(BaseModel, ABC): - none: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) """ # TODO: revisit initial Model Configurations (low priority) + # TODO: enable extra model_config = ConfigDict( str_strip_whitespace=True, use_enum_values=True, @@ -36,7 +38,7 @@ class NDBaseModel(BaseModel, ABC): # TODO: Revisit identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = None - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = None + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = "none" # Optional: fields to exclude from diffs (e.g., passwords) exclude_from_diff: ClassVar[List[str]] = [] @@ -51,7 +53,7 @@ def __init_subclass__(cls, **kwargs): # Skip enforcement for nested models # TODO: Remove if `NDNestedModel` is a separated BaseModel (low priority) - if cls.__name__ in ['NDNestedModel']: + if cls.__name__ in ["NDNestedModel"] or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): return if not hasattr(cls, "identifiers") or cls.identifiers is None: @@ -146,7 +148,7 @@ def to_diff_dict(self) -> Dict[str, Any]: ) # NOTE: initialize and return a deep copy of the instance? - # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... + # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... -> add argument to make it optional either replace def merge(self, other_model: "NDBaseModel") -> Self: if not isinstance(other_model, type(self)): # TODO: Change error message diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index ea511097..77307d07 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -74,6 +74,7 @@ class LocalUserModel(NDBaseModel): # Fields # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec + # TODO: use extra for generating argument_spec (low priority) login_id: str = Field(alias="loginID") email: Optional[str] = Field(default=None, alias="email") first_name: Optional[str] = Field(default=None, alias="firstName") From 8ed627c9b653febfb12f86e753c23dd1cec182cd Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 26 Feb 2026 10:38:50 -0500 Subject: [PATCH 037/109] [ignore] Add choice for when no identifier is needed. Add quick comments and changes to models/local_user.py and api_endpoints/base.py --- plugins/module_utils/api_endpoints/base.py | 6 ++--- plugins/module_utils/models/base.py | 29 +++++++++++----------- plugins/module_utils/models/local_user.py | 6 ++--- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 747c3283..90ef5c87 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -35,9 +35,9 @@ def path(self) -> str: def verb(self) -> str: pass - # TODO: Maybe to be modifed to be more Pydantic - # TODO: Maybe change function's name - # NOTE: function to set endpoints attribute fields from identifiers + # TODO: Maybe to be modifed to be more Pydantic (low priority) + # TODO: Maybe change function's name (low priority) + # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration @abstractmethod def set_identifiers(self, identifier: IdentifierKey = None): pass diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 4ddeacd0..159acb93 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -15,7 +15,6 @@ # TODO: Revisit identifiers strategy (low priority) -# TODO: add kwargs to every sub method class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. @@ -24,24 +23,24 @@ class NDBaseModel(BaseModel, ABC): - single: One unique required field (e.g., ["login_id"]) - composite: Multiple required fields as tuple (e.g., ["device", "interface"]) - hierarchical: Priority-ordered fields (e.g., ["uuid", "name"]) - - none: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) + - singleton: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) """ # TODO: revisit initial Model Configurations (low priority) - # TODO: enable extra model_config = ConfigDict( str_strip_whitespace=True, use_enum_values=True, validate_assignment=True, populate_by_name=True, - extra='ignore' + extra='allow', # NOTE: enabled extra: allows to add extra Field infos for generating Ansible argument_spec and Module Docs ) # TODO: Revisit identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = None - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = "none" + # TODO: Rvisit no identifiers strategy naming (`singleton`) (low priority) + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" # Optional: fields to exclude from diffs (e.g., passwords) - exclude_from_diff: ClassVar[List[str]] = [] + exclude_from_diff: ClassVar[List] = [] unwanted_keys: ClassVar[List] = [] # TODO: Revisit it with identifiers strategy (low priority) @@ -52,7 +51,7 @@ def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Skip enforcement for nested models - # TODO: Remove if `NDNestedModel` is a separated BaseModel (low priority) + # TODO: Remove if `NDNestedModel` is a separated BaseModel (low conditional priority) if cls.__name__ in ["NDNestedModel"] or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): return @@ -64,11 +63,10 @@ def __init_subclass__(cls, **kwargs): if not hasattr(cls, "identifier_strategy") or cls.identifier_strategy is None: raise ValueError( f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." - f"Example: `identifier_strategy: ClassVar[Optional[Literal['single', 'composite', 'hierarchical', 'none']]] = 'single'`" + f"Example: `identifier_strategy: ClassVar[Optional[Literal['single', 'composite', 'hierarchical', 'singleton']]] = 'single'`" ) # NOTE: Might not need to make them absractmethod because of the Pydantic built-in methods (low priority) - # NOTE: Should we use keyword arguments? @abstractmethod def to_payload(self, **kwargs) -> Dict[str, Any]: """ @@ -85,16 +83,15 @@ def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: pass # TODO: Revisit this function when revisiting identifier strategy (low priority) - # TODO: Add condition when there is no identifiers (high priority) - def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: + def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: """ Extract identifier value(s) from this instance: - single identifier: Returns field value. - composite identifiers: Returns tuple of all field values. - hierarchical identifiers: Returns tuple of (field_name, value) for first non-None field. """ - if not self.identifiers: - raise ValueError(f"{self.__class__.__name__} has no identifiers defined") + if not self.identifiers and self.identifier_strategy != "singleton": + raise ValueError(f"{self.__class__.__name__} must have identifiers defined with its current identifier strategy: `{self.identifier_strategy}`") if self.identifier_strategy == "single": value = getattr(self, self.identifiers[0], None) @@ -133,6 +130,10 @@ def get_identifier_value(self) -> Union[str, int, Tuple[Any, ...]]: f"No non-None value in hierarchical fields {self.identifiers}" ) + # TODO: Revisit condition when there is no identifiers (low priority) + elif self.identifier_strategy == "singleton": + return self.identifier_strategy + else: raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") @@ -166,7 +167,7 @@ def merge(self, other_model: "NDBaseModel") -> Self: setattr(self, field, value) return self -# TODO: Make it a seperated BaseModel (low priority) +# TODO: Make it a seperated BaseModel? (low conditional priority) class NDNestedModel(NDBaseModel): """ Base for nested models without identifiers. diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 77307d07..ed09666d 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -16,7 +16,7 @@ # TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel, NDNestedModel from .base import NDBaseModel, NDNestedModel -# TODO: Move it to constants.py and make a reverse class Map for this +# TODO: Move it to constants.py and make a reverse class Map for this (low priority) USER_ROLES_MAPPING = MappingProxyType({ "fabric_admin": "fabric-admin", "observer": "observer", @@ -51,7 +51,7 @@ def serialize_model(self) -> Dict: # NOTE: Not needed as it already defined in `LocalUserModel` -> investigate if needed -# TODO: Add field validation (e.g. me, le, choices, etc...) (medium priority) +# TODO: Add field validation (e.g. me, le, choices, etc...) (low priority) class LocalUserModel(NDBaseModel): """ Local user configuration. @@ -62,7 +62,7 @@ class LocalUserModel(NDBaseModel): # Identifier configuration # TODO: Revisit this identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = ["login_id"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "none"]]] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" # Keys management configurations # TODO: Revisit these configurations (low priority) From 1ddd995e5b42018d920fa97825b0eec17b9d6afb Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 26 Feb 2026 10:42:12 -0500 Subject: [PATCH 038/109] [ignore] Complete orchestrators/base.py by making simple CRUD operations methods that work for single_identifier strategy (meant to be overridden if needed). --- plugins/module_utils/orchestrators/base.py | 48 ++++++++++--------- .../module_utils/orchestrators/local_user.py | 9 ++-- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index e2d9fa75..611f39a6 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -24,61 +24,63 @@ class NDBaseOrchestrator(BaseModel): model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] # NOTE: if not defined by subclasses, return an error as they are required - # TODO: change name from http method to crud (e.g. post -> create) - post_endpoint: Type[NDBaseSmartEndpoint] - put_endpoint: Type[NDBaseSmartEndpoint] + create_endpoint: Type[NDBaseSmartEndpoint] + update_endpoint: Type[NDBaseSmartEndpoint] delete_endpoint: Type[NDBaseSmartEndpoint] - get_endpoint: Type[NDBaseSmartEndpoint] + query_endpoint: Type[NDBaseSmartEndpoint] # NOTE: Module Field is always required - # TODO: Replace it with future sender + # TODO: Replace it with future sender (low priority) module: NDModule # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") - # TODO: Explore new ways to make them even more general + # TODO: Explore new ways to make them even more general -> e.g., create a general API operation function (low priority) # TODO: Revisit Deserialization - def create(self, model_instance: NDBaseModel) -> ResponseType: + def create(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: if self.module.check_mode: return model_instance.to_payload() try: - api_endpoint = self.post_endpoint() + api_endpoint = self.create_endpoint() return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Create failed for {model_instance.get_identifier_value()}: {e}") from e - # TODO: Make the same changes as create() with local api_endpoint variable - def update(self, model_instance: NDBaseModel) -> ResponseType: + def update(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: if self.module.check_mode: return model_instance.to_payload() try: - self.put_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=self.put_endpoint.path, method=self.put_endpoint.verb, data=model_instance.to_payload()) + api_endpoint = self.update_endpoint() + api_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: - raise Exception(f"Update failed for {self.current_identifier}: {e}") from e + raise Exception(f"Update failed for {model_instance.get_identifier_value()}: {e}") from e - def delete(self, model_instance: NDBaseModel) -> ResponseType: + def delete(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: if self.module.check_mode: return model_instance.to_payload() try: - self.delete_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=self.delete_endpoint.path, method=self.delete_endpoint.verb) + api_endpoint = self.delete_endpoint() + api_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: - raise Exception(f"Delete failed for {self.current_identifier}: {e}") from e + raise Exception(f"Delete failed for {model_instance.get_identifier_value()}: {e}") from e - def query_one(self, model_instance: NDBaseModel) -> ResponseType: + def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: - self.get_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=self.get_endpoint.path, method=self.get_endpoint.verb) + api_endpoint = self.query_endpoint() + api_endpoint.set_identifiers(model_instance.get_identifier_value()) + self.query_endpoint.set_identifiers(model_instance.get_identifier_value()) + return self.module.request(path=api_endpoint.path, method=api_endpoint.verb) except Exception as e: - raise Exception(f"Query failed for {self.current_identifier}: {e}") from e + raise Exception(f"Query failed for {model_instance.get_identifier_value()}: {e}") from e # TODO: Revisit the straegy around the query_all (see local_user's case) def query_all(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: - result = self.module.query_obj(self.get_endpoint.path) + result = self.module.query_obj(self.query_endpoint.path) return result or [] except Exception as e: - raise Exception(f"Query all failed: {e}") from e \ No newline at end of file + raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 3810fa83..caacc5aa 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -27,18 +27,17 @@ class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel - post_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost - put_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPut + create_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost + update_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPut delete_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersDelete - get_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet + query_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet def query_all(self): """ Custom query_all action to extract 'localusers' from response. """ try: - result = self.module.query_obj(self.get_endpoint.base_path) + result = self.module.query_obj(self.query_endpoint.base_path) return result.get("localusers", []) or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e - \ No newline at end of file From aa99bbf9214c65accde6d9b996e27a4a7c6d87ea Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 26 Feb 2026 10:44:23 -0500 Subject: [PATCH 039/109] [ignore] Fix and in nd_config_collections.py. Move to utils.py. --- plugins/module_utils/nd_config_collection.py | 42 +++----------------- plugins/module_utils/utils.py | 29 +++++++++++++- 2 files changed, 34 insertions(+), 37 deletions(-) diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index a25287aa..fa6662c9 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -18,10 +18,10 @@ # Type aliases # NOTE: Maybe add more type aliases in the future if needed ModelType = TypeVar('ModelType', bound=NDBaseModel) -# TODO: Defined the same acros multiple files -> maybe move to constants.py +# TODO: Defined the same acros multiple files -> maybe move to constants.py (low priority) IdentifierKey = Union[str, int, Tuple[Any, ...]] -# TODO:Might make it a Pydantic RootModel (low priority but medium impact on NDNetworkResourceModule) +# TODO: Make it a Pydantic RootModel? (low conditional priority but medium impact on NDNetworkResourceModule) class NDConfigCollection(Generic[ModelType]): """ Nexus Dashboard configuration collection for NDBaseModel instances. @@ -59,7 +59,7 @@ def _rebuild_index(self) -> None: key = self._extract_key(item) self._index[key] = index - # Core CRUD Operations + # Core Operations def add(self, item: ModelType) -> IdentifierKey: """ @@ -142,7 +142,7 @@ def delete(self, key: IdentifierKey) -> bool: # Diff Operations # NOTE: Maybe add a similar one in the NDBaseModel (-> but is it necessary?) - def get_diff_config(self, new_item: ModelType, unwanted_keys: Optional[List[Union[str, List[str]]]] = None) -> Literal["new", "no_diff", "changed"]: + def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "changed"]: """ Compare single item against collection. """ @@ -158,16 +158,12 @@ def get_diff_config(self, new_item: ModelType, unwanted_keys: Optional[List[Unio existing_data = existing.to_diff_dict() new_data = new_item.to_diff_dict() - - if unwanted_keys: - existing_data = self._remove_unwanted_keys(existing_data, unwanted_keys) - new_data = self._remove_unwanted_keys(new_data, unwanted_keys) is_subset = issubset(new_data, existing_data) return "no_diff" if is_subset else "changed" - def get_diff_collection(self, other: "NDConfigCollection[ModelType]", unwanted_keys: Optional[List[Union[str, List[str]]]] = None) -> bool: + def get_diff_collection(self, other: "NDConfigCollection[ModelType]") -> bool: """ Check if two collections differ. """ @@ -178,7 +174,7 @@ def get_diff_collection(self, other: "NDConfigCollection[ModelType]", unwanted_k return True for item in other: - if self.get_diff_config(item, unwanted_keys) != "no_diff": + if self.get_diff_config(item) != "no_diff": return True for key in self.keys(): @@ -195,32 +191,6 @@ def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[I other_keys = set(other.keys()) return list(current_keys - other_keys) - # TODO: Maybe not necessary - def _remove_unwanted_keys(self, data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: - """Remove unwanted keys from dict (supports nested paths).""" - data = deepcopy(data) - - for key in unwanted_keys: - if isinstance(key, str): - if key in data: - del data[key] - - elif isinstance(key, list) and len(key) > 0: - try: - parent = data - for k in key[:-1]: - if isinstance(parent, dict) and k in parent: - parent = parent[k] - else: - break - else: - if isinstance(parent, dict) and key[-1] in parent: - del parent[key[-1]] - except (KeyError, TypeError, IndexError): - pass - - return data - # Collection Operations def __len__(self) -> int: diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 72ccbcd7..a7c1d3dc 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -9,7 +9,7 @@ __metaclass__ = type from copy import deepcopy -from typing import Any +from typing import Any, Dict, List, Union def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -54,3 +54,30 @@ def issubset(subset: Any, superset: Any) -> bool: return False return True + + +# TODO: Might not necessary with Pydantic validation and serialization built-in methods +def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: + """Remove unwanted keys from dict (supports nested paths).""" + data = deepcopy(data) + + for key in unwanted_keys: + if isinstance(key, str): + if key in data: + del data[key] + + elif isinstance(key, list) and len(key) > 0: + try: + parent = data + for k in key[:-1]: + if isinstance(parent, dict) and k in parent: + parent = parent[k] + else: + break + else: + if isinstance(parent, dict) and key[-1] in parent: + del parent[key[-1]] + except (KeyError, TypeError, IndexError): + pass + + return data From 04c73ff73cc3ea62a186967f1e69f747050a020d Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 26 Feb 2026 14:01:24 -0500 Subject: [PATCH 040/109] [ignore] Rename NDNetworkResourceModule to NDStateMachine. Add file for NDNestedModel. Add types.file. Various Renaming and small Modifications across the repo. WIP. --- plugins/module_utils/api_endpoints/base.py | 2 +- .../module_utils/api_endpoints/local_user.py | 3 +- plugins/module_utils/constants.py | 21 ++++--- plugins/module_utils/models/base.py | 58 +++++++------------ plugins/module_utils/models/local_user.py | 28 ++++----- plugins/module_utils/models/nested.py | 22 +++++++ plugins/module_utils/nd.py | 5 -- plugins/module_utils/nd_config_collection.py | 28 +++++---- ...twork_resources.py => nd_state_machine.py} | 23 ++++---- plugins/module_utils/orchestrators/base.py | 8 +-- .../module_utils/orchestrators/local_user.py | 5 +- plugins/module_utils/types.py | 14 +++++ plugins/modules/nd_local_user.py | 7 +-- 13 files changed, 115 insertions(+), 109 deletions(-) create mode 100644 plugins/module_utils/models/nested.py rename plugins/module_utils/{nd_network_resources.py => nd_state_machine.py} (95%) create mode 100644 plugins/module_utils/types.py diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 90ef5c87..0355a1de 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -12,8 +12,8 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict from typing import Final, Union, Tuple, Any +from ..types import IdentifierKey -IdentifierKey = Union[str, int, Tuple[Any, ...], None] # TODO: Rename it to APIEndpoint # NOTE: This is a very minimalist endpoint package -> needs to be enhanced diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py index 61f52ad8..666782ab 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -20,8 +20,7 @@ from enums import VerbEnum from base import NDBaseSmartEndpoint, NDBasePath from pydantic import Field - -IdentifierKey = Union[str, int, Tuple[Any, ...], None] +from ..types import IdentifierKey class _EpApiV1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): """ diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index cbba61b3..7bb7e95d 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -9,6 +9,18 @@ __metaclass__ = type +from typing import Dict +from types import MappingProxyType +from copy import deepcopy + +class NDConstantMapping(Dict): + + def __init__(self, data: Dict): + new_dict = deepcopy(data) + for k,v in data.items(): + new_dict[v] = k + return MappingProxyType(new_dict) + OBJECT_TYPES = { "tenant": "OST_TENANT", "vrf": "OST_VRF", @@ -175,12 +187,3 @@ ND_SETUP_NODE_DEPLOYMENT_TYPE = {"physical": "cimc", "virtual": "vnode"} BACKUP_TYPE = {"config_only": "config-only", None: "config-only", "": "config-only", "full": "full"} - -USER_ROLES_MAPPING = { - "fabric_admin": "fabric-admin", - "observer": "observer", - "super_admin": "super-admin", - "support_engineer": "support-engineer", - "approver": "approver", - "designer": "designer", -} diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 159acb93..ca672fd5 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -15,6 +15,7 @@ # TODO: Revisit identifiers strategy (low priority) +# NOTE: what about List of NestedModels? -> make it a separate Sub Model class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. @@ -36,11 +37,12 @@ class NDBaseModel(BaseModel, ABC): # TODO: Revisit identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = None - # TODO: Rvisit no identifiers strategy naming (`singleton`) (low priority) + # TODO: Revisit no identifiers strategy naming (`singleton` -> `unique`, `unnamed`) (low priority) identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" # Optional: fields to exclude from diffs (e.g., passwords) exclude_from_diff: ClassVar[List] = [] + # TODO: To be removed in the future (see local_user model) unwanted_keys: ClassVar[List] = [] # TODO: Revisit it with identifiers strategy (low priority) @@ -51,7 +53,6 @@ def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Skip enforcement for nested models - # TODO: Remove if `NDNestedModel` is a separated BaseModel (low conditional priority) if cls.__name__ in ["NDNestedModel"] or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): return @@ -65,22 +66,26 @@ def __init_subclass__(cls, **kwargs): f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." f"Example: `identifier_strategy: ClassVar[Optional[Literal['single', 'composite', 'hierarchical', 'singleton']]] = 'single'`" ) - - # NOTE: Might not need to make them absractmethod because of the Pydantic built-in methods (low priority) - @abstractmethod + def to_payload(self, **kwargs) -> Dict[str, Any]: """ Convert model to API payload format. """ - pass + return self.model_dump(by_alias=True, exclude_none=True, **kwargs) - @classmethod - @abstractmethod - def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: + def to_config(self, **kwargs) -> Dict[str, Any]: """ - Create model instance from API response. + Convert model to Ansible config format. """ - pass + return self.model_dump(by_name=True, exclude_none=True, **kwargs) + + @classmethod + def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: + return cls.model_validate(response, by_alias=True, **kwargs) + + @classmethod + def from_config(cls, ansible_config: Dict[str, Any], **kwargs) -> Self: + return cls.model_validate(ansible_config, by_name=True, **kwargs) # TODO: Revisit this function when revisiting identifier strategy (low priority) def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: @@ -132,25 +137,26 @@ def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: # TODO: Revisit condition when there is no identifiers (low priority) elif self.identifier_strategy == "singleton": - return self.identifier_strategy + return None else: raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") - def to_diff_dict(self) -> Dict[str, Any]: + def to_diff_dict(self, **kwargs) -> Dict[str, Any]: """ Export for diff comparison (excludes sensitive fields). """ return self.model_dump( by_alias=True, exclude_none=True, - exclude=set(self.exclude_from_diff) + exclude=set(self.exclude_from_diff), + **kwargs ) # NOTE: initialize and return a deep copy of the instance? # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... -> add argument to make it optional either replace - def merge(self, other_model: "NDBaseModel") -> Self: + def merge(self, other_model: "NDBaseModel", **kwargs) -> Self: if not isinstance(other_model, type(self)): # TODO: Change error message return TypeError("models are not of the same type.") @@ -166,25 +172,3 @@ def merge(self, other_model: "NDBaseModel") -> Self: else: setattr(self, field, value) return self - -# TODO: Make it a seperated BaseModel? (low conditional priority) -class NDNestedModel(NDBaseModel): - """ - Base for nested models without identifiers. - """ - - # TODO: Configuration Fields to be clearly defined here (low priority) - identifiers: ClassVar[List[str]] = [] - - def to_payload(self) -> Dict[str, Any]: - """ - Convert model to API payload format. - """ - return self.model_dump(by_alias=True, exclude_none=True) - - @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: - """ - Create model instance from API response. - """ - return cls.model_validate(response, by_alias=True) diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index ed09666d..dba35aee 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -13,11 +13,14 @@ from typing import List, Dict, Any, Optional, ClassVar, Literal from typing_extensions import Self -# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel, NDNestedModel -from .base import NDBaseModel, NDNestedModel - -# TODO: Move it to constants.py and make a reverse class Map for this (low priority) -USER_ROLES_MAPPING = MappingProxyType({ +# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from .base import NDBaseModel +from .nested import NDNestedModel +from ..constants import NDConstantMapping + +# Constant defined here as it is only used in this model +USER_ROLES_MAPPING = NDConstantMapping({ "fabric_admin": "fabric-admin", "observer": "observer", "super_admin": "super-admin", @@ -31,7 +34,7 @@ class LocalUserSecurityDomainModel(NDNestedModel): """Security domain configuration for local user (nested model).""" # Fields - name: str = Field(..., alias="name", exclude=True) + name: str = Field(alias="name", exclude=True) roles: Optional[List[str]] = Field(default=None, alias="roles", exclude=True) # -- Serialization (Model instance -> API payload) -- @@ -47,8 +50,7 @@ def serialize_model(self) -> Dict: } } - # -- Deserialization (API response / Ansible payload -> Model instance) -- - # NOTE: Not needed as it already defined in `LocalUserModel` -> investigate if needed + # NOTE: Deserialization defined in `LocalUserModel` due to API response complexity # TODO: Add field validation (e.g. me, le, choices, etc...) (low priority) @@ -121,10 +123,6 @@ def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) "domains": domains_dict } - - def to_payload(self, **kwargs) -> Dict[str, Any]: - return self.model_dump(by_alias=True, exclude_none=True, **kwargs) - # -- Deserialization (API response / Ansible payload -> Model instance) -- @model_validator(mode="before") @@ -172,12 +170,6 @@ def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: return value - # TODO: only works for api responses but NOT for Ansible configs -> needs to be fixed (high priority) - @classmethod - def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: - return cls.model_validate(response, by_alias=True, **kwargs) - - # -- Extra -- # TODO: to generate from Fields (low priority) diff --git a/plugins/module_utils/models/nested.py b/plugins/module_utils/models/nested.py new file mode 100644 index 00000000..f2560819 --- /dev/null +++ b/plugins/module_utils/models/nested.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import List, ClassVar +from .base import NDBaseModel + + +class NDNestedModel(NDBaseModel): + """ + Base for nested models without identifiers. + """ + + # NOTE: model_config, ClassVar, and Fields can be overwritten here if needed + + identifiers: ClassVar[List[str]] = [] diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index 5f528bb8..07af68e5 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -239,13 +239,8 @@ def request( if file is not None: info = self.connection.send_file_request(method, uri, file, data, None, file_key, file_ext) else: -<<<<<<< HEAD if data: info = self.connection.send_request(method, uri, json.dumps(data)) -======= - if data is not None: - info = conn.send_request(method, uri, json.dumps(data)) ->>>>>>> 7c967c3 ([minor_change] Add nd_local_user as a new network resource module for Nexus Dashboard v4.1.0 and higher.) else: info = self.connection.send_request(method, uri) self.result["data"] = data diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index fa6662c9..364519b8 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -14,14 +14,12 @@ # TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from .models.base import NDBaseModel from .utils import issubset +from .types import IdentifierKey # Type aliases -# NOTE: Maybe add more type aliases in the future if needed ModelType = TypeVar('ModelType', bound=NDBaseModel) -# TODO: Defined the same acros multiple files -> maybe move to constants.py (low priority) -IdentifierKey = Union[str, int, Tuple[Any, ...]] -# TODO: Make it a Pydantic RootModel? (low conditional priority but medium impact on NDNetworkResourceModule) + class NDConfigCollection(Generic[ModelType]): """ Nexus Dashboard configuration collection for NDBaseModel instances. @@ -156,9 +154,9 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha if existing is None: return "new" + # TODO: make a diff class level method for NDBaseModel existing_data = existing.to_diff_dict() new_data = new_item.to_diff_dict() - is_subset = issubset(new_data, existing_data) return "no_diff" if is_subset else "changed" @@ -214,30 +212,30 @@ def copy(self) -> "NDConfigCollection[ModelType]": # Collection Serialization - def to_list(self, **kwargs) -> List[Dict]: + def to_ansible_config(self, **kwargs) -> List[Dict]: """ - Export as list of dicts (with aliases). + Export as an Ansible config. """ - return [item.model_dump(by_alias=True, exclude_none=True, **kwargs) for item in self._items] + return [item.to_config(**kwargs) for item in self._items] - def to_payload_list(self) -> List[Dict[str, Any]]: + def to_payload_list(self, **kwargs) -> List[Dict[str, Any]]: """ Export as list of API payloads. """ - return [item.to_payload() for item in self._items] + return [item.to_payload(**kwargs) for item in self._items] @classmethod - def from_list(cls, data: List[Dict], model_class: type[ModelType]) -> "NDConfigCollection[ModelType]": + def from_ansible_config(cls, data: List[Dict], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": """ - Create collection from list of dicts. + Create collection from Ansible config. """ - items = [model_class.model_validate(item_data, by_name=True) for item_data in data] + items = [model_class.from_config(item_data, **kwargs) for item_data in data] return cls(model_class=model_class, items=items) @classmethod - def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[ModelType]) -> "NDConfigCollection[ModelType]": + def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": """ Create collection from API response. """ - items = [model_class.from_response(item_data) for item_data in response_data] + items = [model_class.from_response(item_data, **kwargs) for item_data in response_data] return cls(model_class=model_class, items=items) diff --git a/plugins/module_utils/nd_network_resources.py b/plugins/module_utils/nd_state_machine.py similarity index 95% rename from plugins/module_utils/nd_network_resources.py rename to plugins/module_utils/nd_state_machine.py index d52fb9de..5306bfe8 100644 --- a/plugins/module_utils/nd_network_resources.py +++ b/plugins/module_utils/nd_state_machine.py @@ -24,26 +24,24 @@ from .orchestrators.base import NDBaseOrchestrator from constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED -# TODO: replace path and verbs with smart Endpoint (Top priority) -# TODO: Rename it (low priority) + # TODO: Revisit Deserialization in every method (high priority) -class NDNetworkResourceModule(NDModule): +class NDStateMachine(NDModule): """ Generic Network Resource Module for Nexus Dashboard. """ - def __init__(self, module: AnsibleModule, model_class: Type[NDBaseModel], model_orchestrator: Type[NDBaseOrchestrator]): + def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchestrator]): """ Initialize the Network Resource Module. """ # TODO: Revisit Module initialization and configuration (medium priority). e.g., use instead: # nd_module = NDModule() super().__init__(module) - + # Configuration - # TODO: make sure `model_class` is the same as the one in `model_orchestrator`. if not, error out (high priority) - self.model_class = model_class self.model_orchestrator = model_orchestrator(module=module) + self.model_class = self.model_orchestrator.model_class # TODO: Revisit these class variables when udpating Module intialization and configuration (medium priority) self.state = self.params["state"] self.ansible_config = self.params["config"] @@ -52,17 +50,17 @@ def __init__(self, module: AnsibleModule, model_class: Type[NDBaseModel], model_ # Initialize collections # TODO: Revisit collections initialization especially `init_all_data` (medium priority) # TODO: Revisit class variables `previous`, `existing`, etc... (medium priority) - self.nd_config_collection = NDConfigCollection[model_class] + self.nd_config_collection = NDConfigCollection[self.model_class] try: init_all_data = self.model_orchestrator.query_all() self.existing = self.nd_config_collection.from_api_response( response_data=init_all_data, - model_class=model_class + model_class=self.model_class ) - self.previous = self.nd_config_collection(model_class=model_class) - self.proposed = self.nd_config_collection(model_class=model_class) - self.sent = self.nd_config_collection(model_class=model_class) + self.previous = self.nd_config_collection(model_class=self.model_class) + self.proposed = self.nd_config_collection(model_class=self.model_class) + self.sent = self.nd_config_collection(model_class=self.model_class) except Exception as e: self.fail_json( @@ -340,6 +338,7 @@ def _manage_delete_state(self) -> None: # Output Formatting # TODO: move to separate Class (results) -> align it with rest_send PR + # TODO: return a defined ordered list of config (for integration test) def add_logs_and_outputs(self) -> None: """Add logs and outputs to module result based on output_level.""" output_level = self.params.get("output_level", "normal") diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 611f39a6..db72b740 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -27,7 +27,8 @@ class NDBaseOrchestrator(BaseModel): create_endpoint: Type[NDBaseSmartEndpoint] update_endpoint: Type[NDBaseSmartEndpoint] delete_endpoint: Type[NDBaseSmartEndpoint] - query_endpoint: Type[NDBaseSmartEndpoint] + query_one_endpoint: Type[NDBaseSmartEndpoint] + query_all_endpoint: Type[NDBaseSmartEndpoint] # NOTE: Module Field is always required # TODO: Replace it with future sender (low priority) @@ -70,9 +71,8 @@ def delete(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: - api_endpoint = self.query_endpoint() + api_endpoint = self.query_one_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) - self.query_endpoint.set_identifiers(model_instance.get_identifier_value()) return self.module.request(path=api_endpoint.path, method=api_endpoint.verb) except Exception as e: raise Exception(f"Query failed for {model_instance.get_identifier_value()}: {e}") from e @@ -80,7 +80,7 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: # TODO: Revisit the straegy around the query_all (see local_user's case) def query_all(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: - result = self.module.query_obj(self.query_endpoint.path) + result = self.module.query_obj(self.query_all_endpoint.path) return result or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index caacc5aa..ef2aa36a 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -30,14 +30,15 @@ class LocalUserOrchestrator(NDBaseOrchestrator): create_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost update_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPut delete_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersDelete - query_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet + query_one_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet + query_all_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet def query_all(self): """ Custom query_all action to extract 'localusers' from response. """ try: - result = self.module.query_obj(self.query_endpoint.base_path) + result = self.module.query_obj(self.query_all_endpoint.base_path) return result.get("localusers", []) or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/types.py b/plugins/module_utils/types.py new file mode 100644 index 00000000..124aedd5 --- /dev/null +++ b/plugins/module_utils/types.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Union, Tuple + + +IdentifierKey = Union[str, int, Tuple[Any, ...]] diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 901549fb..67fb3e80 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -181,7 +181,7 @@ # from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel # from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING from ..module_utils.nd import nd_argument_spec -from ..module_utils.nd_network_resources import NDNetworkResourceModule +from ..module_utils.nd_state_machine import NDStateMachine from ..module_utils.models.local_user import LocalUserModel from ..module_utils.orchestrators.local_user import LocalUserOrchestrator @@ -194,12 +194,11 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - + try: # Create NDNetworkResourceModule with LocalUserModel - nd_module = NDNetworkResourceModule( + nd_module = NDStateMachine( module=module, - model_class=LocalUserModel, model_orchestrator=LocalUserOrchestrator, ) From 85c36e8f10b4840f3a0f2faf87bf2acec3e7bb6b Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 26 Feb 2026 14:09:18 -0500 Subject: [PATCH 041/109] [ignore] Make a small change to NDModule request function. --- plugins/module_utils/nd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index 07af68e5..42b1b118 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -239,7 +239,7 @@ def request( if file is not None: info = self.connection.send_file_request(method, uri, file, data, None, file_key, file_ext) else: - if data: + if data is not None: info = self.connection.send_request(method, uri, json.dumps(data)) else: info = self.connection.send_request(method, uri) From 034b49f6e67656be64c439b8c59b187a727fcfc2 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Mon, 2 Mar 2026 17:59:17 -0500 Subject: [PATCH 042/109] [ignore] Modify nd_state_machine to work with orchestrators/models/api_endpoints. Adapt api_endpoints, models, orchestrators accordingly. Integration Tests passing for nd_local_user module. Still WIP. --- plugins/module_utils/api_endpoints/base.py | 6 +- .../module_utils/api_endpoints/local_user.py | 6 +- plugins/module_utils/constants.py | 9 +- plugins/module_utils/models/base.py | 3 +- plugins/module_utils/models/local_user.py | 5 +- plugins/module_utils/nd_state_machine.py | 237 ++++++++---------- plugins/module_utils/orchestrators/base.py | 34 ++- .../module_utils/orchestrators/local_user.py | 2 +- plugins/modules/nd_local_user.py | 4 +- requirements.txt | 3 +- .../network-integration.requirements.txt | 3 +- 11 files changed, 140 insertions(+), 172 deletions(-) diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 0355a1de..832476ed 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -25,13 +25,13 @@ class NDBaseSmartEndpoint(BaseModel, ABC): # TODO: to remove base_path: str - @abstractmethod @property + @abstractmethod def path(self) -> str: pass - - @abstractmethod + @property + @abstractmethod def verb(self) -> str: pass diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py index 666782ab..cae1326b 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -16,9 +16,9 @@ __metaclass__ = type # pylint: disable=invalid-name from typing import Literal, Union, Tuple, Any, Final -from mixins import LoginIdMixin -from enums import VerbEnum -from base import NDBaseSmartEndpoint, NDBasePath +from .mixins import LoginIdMixin +from .enums import VerbEnum +from .base import NDBaseSmartEndpoint, NDBasePath from pydantic import Field from ..types import IdentifierKey diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index 7bb7e95d..784a7f51 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -16,10 +16,13 @@ class NDConstantMapping(Dict): def __init__(self, data: Dict): - new_dict = deepcopy(data) + self.new_dict = deepcopy(data) for k,v in data.items(): - new_dict[v] = k - return MappingProxyType(new_dict) + self.new_dict[v] = k + self.new_dict = MappingProxyType(self.new_dict) + + def get_dict(self): + return self.new_dict OBJECT_TYPES = { "tenant": "OST_TENANT", diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index ca672fd5..7b569a58 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -32,6 +32,7 @@ class NDBaseModel(BaseModel, ABC): use_enum_values=True, validate_assignment=True, populate_by_name=True, + arbitrary_types_allowed=True, extra='allow', # NOTE: enabled extra: allows to add extra Field infos for generating Ansible argument_spec and Module Docs ) @@ -77,7 +78,7 @@ def to_config(self, **kwargs) -> Dict[str, Any]: """ Convert model to Ansible config format. """ - return self.model_dump(by_name=True, exclude_none=True, **kwargs) + return self.model_dump(by_alias=False, exclude_none=True, **kwargs) @classmethod def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index dba35aee..713d6040 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -27,7 +27,7 @@ "support_engineer": "support-engineer", "approver": "approver", "designer": "designer", -}) +}).get_dict() class LocalUserSecurityDomainModel(NDNestedModel): @@ -173,7 +173,8 @@ def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: # -- Extra -- # TODO: to generate from Fields (low priority) - def get_argument_spec(self): + @classmethod + def get_argument_spec(cls) -> Dict: return dict( config=dict( type="list", diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 5306bfe8..5b1f770c 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -16,16 +16,16 @@ # TODO: To be replaced with: # from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule # from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection -# from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +# from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +# from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey # from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED -from nd import NDModule -from nd_config_collection import NDConfigCollection -from models.base import NDBaseModel +from .nd import NDModule +from .nd_config_collection import NDConfigCollection from .orchestrators.base import NDBaseOrchestrator -from constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +from .types import IdentifierKey +from .constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED -# TODO: Revisit Deserialization in every method (high priority) class NDStateMachine(NDModule): """ Generic Network Resource Module for Nexus Dashboard. @@ -35,16 +35,21 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest """ Initialize the Network Resource Module. """ - # TODO: Revisit Module initialization and configuration (medium priority). e.g., use instead: + # TODO: Revisit Module initialization and configuration # nd_module = NDModule() - super().__init__(module) + self.module = module + self.nd_module = NDModule(module) + + # Operation tracking + self.nd_logs: List[Dict[str, Any]] = [] + self.result: Dict[str, Any] = {"changed": False} # Configuration - self.model_orchestrator = model_orchestrator(module=module) + self.model_orchestrator = model_orchestrator(sender=self.nd_module) self.model_class = self.model_orchestrator.model_class # TODO: Revisit these class variables when udpating Module intialization and configuration (medium priority) - self.state = self.params["state"] - self.ansible_config = self.params["config"] + self.state = self.module.params["state"] + self.ansible_config = self.module.params.get("config", []) # Initialize collections @@ -53,46 +58,64 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.nd_config_collection = NDConfigCollection[self.model_class] try: init_all_data = self.model_orchestrator.query_all() - + self.existing = self.nd_config_collection.from_api_response( response_data=init_all_data, model_class=self.model_class ) - self.previous = self.nd_config_collection(model_class=self.model_class) + # Save previous state + self.previous = self.existing.copy() self.proposed = self.nd_config_collection(model_class=self.model_class) self.sent = self.nd_config_collection(model_class=self.model_class) - + + for config in self.ansible_config: + try: + # Parse config into model + item = self.model_class.from_config(config) + self.proposed.add(item) + except ValidationError as e: + self.fail_json( + msg=f"Invalid configuration: {e}", + config=config, + validation_errors=e.errors() + ) + return + except Exception as e: self.fail_json( msg=f"Initialization failed: {str(e)}", error=str(e) ) - - # Operation tracking - self.nd_logs: List[Dict[str, Any]] = [] # Logging # NOTE: format log placeholder # TODO: use a proper logger (low priority) - def format_log(self, identifier, status: Literal["created", "updated", "deleted", "no_change"], after_data: Optional[Dict[str, Any]] = None, sent_payload_data: Optional[Dict[str, Any]] = None) -> None: + def format_log( + self, + identifier: IdentifierKey, + operation_status: Literal["no_change", "created", "updated", "deleted"], + before: Optional[Dict[str, Any]] = None, + after: Optional[Dict[str, Any]] = None, + payload: Optional[Dict[str, Any]] = None, + ) -> None: """ Create and append a log entry. """ log_entry = { "identifier": identifier, - "status": status, - "before": deepcopy(self.existing_config), - "after": deepcopy(after_data) if after_data is not None else self.existing_config, - "sent_payload": deepcopy(sent_payload_data) if sent_payload_data is not None else {} + "operation_status": operation_status, + "before": before, + "after": after, + "payload": payload, } # Add HTTP details if not in check mode - if not self.module.check_mode and self.url is not None: + if not self.module.check_mode and self.nd_module.url is not None: log_entry.update({ - "method": self.method, - "response": self.response, - "status": self.status, - "url": self.url + "method": self.nd_module.method, + "response": self.nd_module.response, + "status": self.nd_module.status, + "url": self.nd_module.url }) self.nd_logs.append(log_entry) @@ -103,42 +126,6 @@ def manage_state(self) -> None: """ Manage state according to desired configuration. """ - unwanted_keys = unwanted_keys or [] - - # Parse and validate configs - # TODO: move it to init() (top priority) - # TODO: Modify it if NDConfigCollection becomes a Pydantic RootModel (low priority) - try: - parsed_items = [] - for config in self.ansible_config: - try: - # Parse config into model - item = self.model_class.model_validate(config) - parsed_items.append(item) - except ValidationError as e: - self.fail_json( - msg=f"Invalid configuration: {e}", - config=config, - validation_errors=e.errors() - ) - return - - # Create proposed collection - self.proposed = self.nd_config_collection( - model_class=self.model_class, - items=parsed_items - ) - - # Save previous state - self.previous = self.existing.copy() - - except Exception as e: - self.fail_json( - msg=f"Failed to prepare configurations: {e}", - error=str(e) - ) - return - # Execute state operations if self.state in ["merged", "replaced", "overridden"]: self._manage_create_update_state() @@ -159,18 +146,10 @@ def _manage_create_update_state(self) -> None: Handle merged/replaced/overridden states. """ for proposed_item in self.proposed: + # Extract identifier + identifier = proposed_item.get_identifier_value() + existing_config = self.existing.get(identifier).to_config() if self.existing.get(identifier) else {} try: - # Extract identifier - # TODO: Remove self.current_identifier, get it directly into the action functions - identifier = proposed_item.get_identifier_value() - - existing_item = self.existing.get(identifier) - self.existing_config = ( - existing_item.model_dump(by_alias=True, exclude_none=True) - if existing_item - else {} - ) - # Determine diff status diff_status = self.existing.get_diff_config(proposed_item) @@ -178,51 +157,44 @@ def _manage_create_update_state(self) -> None: if diff_status == "no_diff": self.format_log( identifier=identifier, - status="no_change", - after_data=self.existing_config + operation_status="no_change", + before=existing_config, + after=existing_config, ) continue # Prepare final config based on state - if self.state == "merged" and existing_item: + if self.state == "merged": # Merge with existing merged_item = self.existing.merge(proposed_item) final_item = merged_item else: # Replace or create - if existing_item: + if diff_status == "changed": self.existing.replace(proposed_item) else: self.existing.add(proposed_item) final_item = proposed_item - - # Convert to API payload - self.proposed_config = final_item.to_payload() - + # Execute API operation if diff_status == "changed": - response = self.model_orchestrator.update(final_item) + if not self.module.check_mode: + response = self.model_orchestrator.update(final_item) + self.sent.add(final_item) operation_status = "updated" - else: - response = self.model_orchestrator.create(final_item) + elif diff_status == "new": + if not self.module.check_mode: + response = self.model_orchestrator.create(final_item) + self.sent.add(final_item) operation_status = "created" - # Track sent payload - if not self.module.check_mode: - self.sent.add(final_item) - sent_payload = final_item - else: - sent_payload = None - # Log operation self.format_log( identifier=identifier, - status=operation_status, - after_data=( - response if not self.module.check_mode - else final_item.model_dump(by_alias=True, exclude_none=True) - ), - sent_payload_data=sent_payload + operation_status=operation_status, + before=existing_config, + after=self.model_class.model_validate(response).to_config() if not self.module.check_mode else final_item.to_config(), + payload=final_item.to_payload(), ) except Exception as e: @@ -230,11 +202,12 @@ def _manage_create_update_state(self) -> None: self.format_log( identifier=identifier, - status="no_change", - after_data=self.existing_config + operation_status="no_change", + before=existing_config, + after=existing_config, ) - if not self.params.get("ignore_errors", False): + if not self.module.params.get("ignore_errors", False): self.fail_json( msg=error_msg, identifier=str(identifier), @@ -243,30 +216,21 @@ def _manage_create_update_state(self) -> None: return # TODO: Refactor with orchestrator (Top priority) - def _manage_override_deletions(self, override_exceptions: List) -> None: + def _manage_override_deletions(self) -> None: """ Delete items not in proposed config (for overridden state). """ diff_identifiers = self.previous.get_diff_identifiers(self.proposed) for identifier in diff_identifiers: - if identifier in override_exceptions: - continue - try: - self.current_identifier = identifier - existing_item = self.existing.get(identifier) if not existing_item: continue - - self.existing_config = existing_item.model_dump( - by_alias=True, - exclude_none=True - ) - + # Execute delete - self._delete() + if not self.module.check_mode: + response = self.model_orchestrator.delete(existing_item) # Remove from collection self.existing.delete(identifier) @@ -274,8 +238,10 @@ def _manage_override_deletions(self, override_exceptions: List) -> None: # Log deletion self.format_log( identifier=identifier, - status="deleted", - after_data={} + operation_status="deleted", + before=existing_item.to_config(), + after={}, + ) except Exception as e: @@ -295,25 +261,21 @@ def _manage_delete_state(self) -> None: for proposed_item in self.proposed: try: identifier = proposed_item.get_identifier_value() - self.current_identifier = identifier existing_item = self.existing.get(identifier) if not existing_item: # Already deleted or doesn't exist self.format_log( identifier=identifier, - status="no_change", - after_data={} + operation_status="no_change", + before={}, + after={}, ) continue - self.existing_config = existing_item.model_dump( - by_alias=True, - exclude_none=True - ) - # Execute delete - self._delete() + if not self.module.check_mode: + response = self.model_orchestrator.delete(existing_item) # Remove from collection self.existing.delete(identifier) @@ -321,8 +283,9 @@ def _manage_delete_state(self) -> None: # Log deletion self.format_log( identifier=identifier, - status="deleted", - after_data={} + operation_status="deleted", + before=existing_item.to_config(), + after={}, ) except Exception as e: @@ -341,35 +304,35 @@ def _manage_delete_state(self) -> None: # TODO: return a defined ordered list of config (for integration test) def add_logs_and_outputs(self) -> None: """Add logs and outputs to module result based on output_level.""" - output_level = self.params.get("output_level", "normal") - state = self.params.get("state") + output_level = self.module.params.get("output_level", "normal") + state = self.module.params.get("state") # Add previous state for certain states and output levels if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: if output_level in ("debug", "info"): - self.result["previous"] = self.previous.to_list() + self.result["previous"] = self.previous.to_ansible_config() # Check if there were changes - if not self.has_modified and self.previous.get_diff_collection(self.existing): + if self.previous.get_diff_collection(self.existing): self.result["changed"] = True # Add stdout if present - if self.stdout: - self.result["stdout"] = self.stdout + if self.nd_module.stdout: + self.result["stdout"] = self.nd_module.stdout # Add debug information if output_level == "debug": self.result["nd_logs"] = self.nd_logs - if self.url is not None: - self.result["httpapi_logs"] = self.httpapi_logs + if self.nd_module.url is not None: + self.result["httpapi_logs"] = self.nd_module.httpapi_logs if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: self.result["sent"] = self.sent.to_payload_list() - self.result["proposed"] = self.proposed.to_list() + self.result["proposed"] = self.proposed.to_ansible_config() # Always include current state - self.result["current"] = self.existing.to_list() + self.result["current"] = self.existing.to_ansible_config() # Module Exit Methods diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index db72b740..924ea4b0 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -11,8 +11,8 @@ from ..models.base import NDBaseModel from ..nd import NDModule from ..api_endpoints.base import NDBaseSmartEndpoint -from typing import Dict, List, Any, Union, ClassVar, Type -from pydantic import BaseModel +from typing import Dict, List, Any, Union, ClassVar, Type, Optional +from pydantic import BaseModel, ConfigDict ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] @@ -21,6 +21,13 @@ # TODO: Revisit naming them "Orchestrator" class NDBaseOrchestrator(BaseModel): + model_config = ConfigDict( + use_enum_values=True, + validate_assignment=True, + populate_by_name=True, + arbitrary_types_allowed=True, + ) + model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] # NOTE: if not defined by subclasses, return an error as they are required @@ -32,40 +39,31 @@ class NDBaseOrchestrator(BaseModel): # NOTE: Module Field is always required # TODO: Replace it with future sender (low priority) - module: NDModule + sender: NDModule # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") # TODO: Explore new ways to make them even more general -> e.g., create a general API operation function (low priority) # TODO: Revisit Deserialization def create(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: - if self.module.check_mode: - return model_instance.to_payload() - try: api_endpoint = self.create_endpoint() - return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) + return self.sender.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Create failed for {model_instance.get_identifier_value()}: {e}") from e def update(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: - if self.module.check_mode: - return model_instance.to_payload() - try: api_endpoint = self.update_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) + return self.sender.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Update failed for {model_instance.get_identifier_value()}: {e}") from e def delete(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: - if self.module.check_mode: - return model_instance.to_payload() - try: api_endpoint = self.delete_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) + return self.sender.request(path=api_endpoint.path, method=api_endpoint.verb) except Exception as e: raise Exception(f"Delete failed for {model_instance.get_identifier_value()}: {e}") from e @@ -73,14 +71,14 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: api_endpoint = self.query_one_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) - return self.module.request(path=api_endpoint.path, method=api_endpoint.verb) + return self.sender.request(path=api_endpoint.path, method=api_endpoint.verb) except Exception as e: raise Exception(f"Query failed for {model_instance.get_identifier_value()}: {e}") from e # TODO: Revisit the straegy around the query_all (see local_user's case) - def query_all(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: + def query_all(self, model_instance: Optional[NDBaseModel] = None, **kwargs) -> ResponseType: try: - result = self.module.query_obj(self.query_all_endpoint.path) + result = self.sender.query_obj(self.query_all_endpoint.path) return result or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index ef2aa36a..46a4ea07 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -38,7 +38,7 @@ def query_all(self): Custom query_all action to extract 'localusers' from response. """ try: - result = self.module.query_obj(self.query_all_endpoint.base_path) + result = self.sender.query_obj(self.query_all_endpoint.base_path) return result.get("localusers", []) or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 67fb3e80..b6acee72 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -177,9 +177,9 @@ from ansible.module_utils.basic import AnsibleModule # TODO: To be replaced with: # from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec -# from ansible_collections.cisco.nd.plugins.module_utils.nd_network_resource_module import NDNetworkResourceModule +# from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine # from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel -# from ansible_collections.cisco.nd.plugins.module_utils.constants import USER_ROLES_MAPPING +# from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.local_user import LocalUserOrchestrator from ..module_utils.nd import nd_argument_spec from ..module_utils.nd_state_machine import NDStateMachine from ..module_utils.models.local_user import LocalUserModel diff --git a/requirements.txt b/requirements.txt index 514632d1..98907e9a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ requests_toolbelt jsonpath-ng -lxml \ No newline at end of file +lxml +pydantic==2.12.5 \ No newline at end of file diff --git a/tests/integration/network-integration.requirements.txt b/tests/integration/network-integration.requirements.txt index 514632d1..98907e9a 100644 --- a/tests/integration/network-integration.requirements.txt +++ b/tests/integration/network-integration.requirements.txt @@ -1,3 +1,4 @@ requests_toolbelt jsonpath-ng -lxml \ No newline at end of file +lxml +pydantic==2.12.5 \ No newline at end of file From b6ddee406b72cf93f24854981db82d58f7e8f474 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 3 Mar 2026 11:46:31 -0500 Subject: [PATCH 043/109] [ignore] Add proper path dependencies and Ran black formatting. --- plugins/module_utils/api_endpoints/base.py | 5 +- plugins/module_utils/api_endpoints/enums.py | 2 +- .../module_utils/api_endpoints/local_user.py | 13 +- plugins/module_utils/api_endpoints/mixins.py | 2 +- plugins/module_utils/constants.py | 7 +- plugins/module_utils/models/base.py | 60 +++--- plugins/module_utils/models/local_user.py | 75 +++----- plugins/module_utils/models/nested.py | 2 +- plugins/module_utils/nd_config_collection.py | 94 +++++----- plugins/module_utils/nd_state_machine.py | 171 +++++++----------- plugins/module_utils/orchestrators/base.py | 9 +- .../module_utils/orchestrators/local_user.py | 12 +- plugins/module_utils/utils.py | 16 +- plugins/modules/nd_api_key.py | 1 - plugins/modules/nd_local_user.py | 25 +-- 15 files changed, 204 insertions(+), 290 deletions(-) diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 832476ed..954c1f6a 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -12,13 +12,12 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict from typing import Final, Union, Tuple, Any -from ..types import IdentifierKey +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import IdentifierKey # TODO: Rename it to APIEndpoint # NOTE: This is a very minimalist endpoint package -> needs to be enhanced class NDBaseSmartEndpoint(BaseModel, ABC): - # TODO: maybe to be modified in the future model_config = ConfigDict(validate_assignment=True) @@ -29,7 +28,7 @@ class NDBaseSmartEndpoint(BaseModel, ABC): @abstractmethod def path(self) -> str: pass - + @property @abstractmethod def verb(self) -> str: diff --git a/plugins/module_utils/api_endpoints/enums.py b/plugins/module_utils/api_endpoints/enums.py index afb4dd5c..ced62ba7 100644 --- a/plugins/module_utils/api_endpoints/enums.py +++ b/plugins/module_utils/api_endpoints/enums.py @@ -43,4 +43,4 @@ class BooleanStringEnum(str, Enum): """ TRUE = "true" - FALSE = "false" \ No newline at end of file + FALSE = "false" diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py index cae1326b..72639495 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -16,11 +16,12 @@ __metaclass__ = type # pylint: disable=invalid-name from typing import Literal, Union, Tuple, Any, Final -from .mixins import LoginIdMixin -from .enums import VerbEnum -from .base import NDBaseSmartEndpoint, NDBasePath +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.mixins import LoginIdMixin +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.enums import VerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint, NDBasePath from pydantic import Field -from ..types import IdentifierKey +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey + class _EpApiV1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): """ @@ -105,7 +106,7 @@ class EpApiV1InfraAaaLocalUsersPost(_EpApiV1InfraAaaLocalUsersBase): """ class_name: Literal["EpApiV1InfraAaaLocalUsersPost"] = Field( - default="EpApiV1InfraAaaLocalUsersPost", + default="EpApiV1InfraAaaLocalUsersPost", description="Class name for backward compatibility", frozen=True, ) @@ -136,7 +137,7 @@ class EpApiV1InfraAaaLocalUsersPut(_EpApiV1InfraAaaLocalUsersBase): """ class_name: Literal["EpApiV1InfraAaaLocalUsersPut"] = Field( - default="EpApiV1InfraAaaLocalUsersPut", + default="EpApiV1InfraAaaLocalUsersPut", description="Class name for backward compatibility", frozen=True, ) diff --git a/plugins/module_utils/api_endpoints/mixins.py b/plugins/module_utils/api_endpoints/mixins.py index 8ff3218f..9516c9ce 100644 --- a/plugins/module_utils/api_endpoints/mixins.py +++ b/plugins/module_utils/api_endpoints/mixins.py @@ -22,4 +22,4 @@ class LoginIdMixin(BaseModel): """Mixin for endpoints that require login_id parameter.""" - login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") \ No newline at end of file + login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index 784a7f51..afa0a2b0 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -13,17 +13,18 @@ from types import MappingProxyType from copy import deepcopy -class NDConstantMapping(Dict): +class NDConstantMapping(Dict): def __init__(self, data: Dict): self.new_dict = deepcopy(data) - for k,v in data.items(): + for k, v in data.items(): self.new_dict[v] = k self.new_dict = MappingProxyType(self.new_dict) - + def get_dict(self): return self.new_dict + OBJECT_TYPES = { "tenant": "OST_TENANT", "vrf": "OST_VRF", diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 7b569a58..94fb9cc5 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -19,13 +19,14 @@ class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. - + Supports three identifier strategies: - single: One unique required field (e.g., ["login_id"]) - composite: Multiple required fields as tuple (e.g., ["device", "interface"]) - hierarchical: Priority-ordered fields (e.g., ["uuid", "name"]) - singleton: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) """ + # TODO: revisit initial Model Configurations (low priority) model_config = ConfigDict( str_strip_whitespace=True, @@ -33,14 +34,14 @@ class NDBaseModel(BaseModel, ABC): validate_assignment=True, populate_by_name=True, arbitrary_types_allowed=True, - extra='allow', # NOTE: enabled extra: allows to add extra Field infos for generating Ansible argument_spec and Module Docs + extra="allow", # NOTE: enabled extra: allows to add extra Field infos for generating Ansible argument_spec and Module Docs ) # TODO: Revisit identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = None # TODO: Revisit no identifiers strategy naming (`singleton` -> `unique`, `unnamed`) (low priority) identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - + # Optional: fields to exclude from diffs (e.g., passwords) exclude_from_diff: ClassVar[List] = [] # TODO: To be removed in the future (see local_user model) @@ -52,7 +53,7 @@ def __init_subclass__(cls, **kwargs): Enforce configuration for identifiers definition. """ super().__init_subclass__(**kwargs) - + # Skip enforcement for nested models if cls.__name__ in ["NDNestedModel"] or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): return @@ -73,7 +74,7 @@ def to_payload(self, **kwargs) -> Dict[str, Any]: Convert model to API payload format. """ return self.model_dump(by_alias=True, exclude_none=True, **kwargs) - + def to_config(self, **kwargs) -> Dict[str, Any]: """ Convert model to Ansible config format. @@ -83,11 +84,11 @@ def to_config(self, **kwargs) -> Dict[str, Any]: @classmethod def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: return cls.model_validate(response, by_alias=True, **kwargs) - + @classmethod def from_config(cls, ansible_config: Dict[str, Any], **kwargs) -> Self: return cls.model_validate(ansible_config, by_name=True, **kwargs) - + # TODO: Revisit this function when revisiting identifier strategy (low priority) def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: """ @@ -98,74 +99,61 @@ def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: """ if not self.identifiers and self.identifier_strategy != "singleton": raise ValueError(f"{self.__class__.__name__} must have identifiers defined with its current identifier strategy: `{self.identifier_strategy}`") - + if self.identifier_strategy == "single": value = getattr(self, self.identifiers[0], None) if value is None: - raise ValueError( - f"Single identifier field '{self.identifiers[0]}' is None" - ) + raise ValueError(f"Single identifier field '{self.identifiers[0]}' is None") return value - + elif self.identifier_strategy == "composite": values = [] missing = [] - + for field in self.identifiers: value = getattr(self, field, None) if value is None: missing.append(field) values.append(value) - + # NOTE: might be redefined with Pydantic (low priority) if missing: - raise ValueError( - f"Composite identifier fields {missing} are None. " - f"All required: {self.identifiers}" - ) - + raise ValueError(f"Composite identifier fields {missing} are None. " f"All required: {self.identifiers}") + return tuple(values) - + elif self.identifier_strategy == "hierarchical": for field in self.identifiers: value = getattr(self, field, None) if value is not None: return (field, value) - - raise ValueError( - f"No non-None value in hierarchical fields {self.identifiers}" - ) - + + raise ValueError(f"No non-None value in hierarchical fields {self.identifiers}") + # TODO: Revisit condition when there is no identifiers (low priority) elif self.identifier_strategy == "singleton": return None - + else: raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") - def to_diff_dict(self, **kwargs) -> Dict[str, Any]: """ Export for diff comparison (excludes sensitive fields). """ - return self.model_dump( - by_alias=True, - exclude_none=True, - exclude=set(self.exclude_from_diff), - **kwargs - ) - + return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), **kwargs) + # NOTE: initialize and return a deep copy of the instance? # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... -> add argument to make it optional either replace def merge(self, other_model: "NDBaseModel", **kwargs) -> Self: if not isinstance(other_model, type(self)): # TODO: Change error message return TypeError("models are not of the same type.") - + for field, value in other_model: if value is None: continue - + current_value = getattr(self, field) if isinstance(current_value, NDBaseModel) and isinstance(value, NDBaseModel): setattr(self, field, current_value.merge(value)) diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 713d6040..e759a6fb 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -8,26 +8,25 @@ __metaclass__ = type -from pydantic import Field, SecretStr, model_serializer, field_serializer, field_validator, model_validator, computed_field from types import MappingProxyType from typing import List, Dict, Any, Optional, ClassVar, Literal from typing_extensions import Self - -# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel -from .base import NDBaseModel -from .nested import NDNestedModel -from ..constants import NDConstantMapping +from pydantic import Field, SecretStr, model_serializer, field_serializer, field_validator, model_validator, computed_field +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.constants import NDConstantMapping # Constant defined here as it is only used in this model -USER_ROLES_MAPPING = NDConstantMapping({ - "fabric_admin": "fabric-admin", - "observer": "observer", - "super_admin": "super-admin", - "support_engineer": "support-engineer", - "approver": "approver", - "designer": "designer", -}).get_dict() +USER_ROLES_MAPPING = NDConstantMapping( + { + "fabric_admin": "fabric-admin", + "observer": "observer", + "super_admin": "super-admin", + "support_engineer": "support-engineer", + "approver": "approver", + "designer": "designer", + } +).get_dict() class LocalUserSecurityDomainModel(NDNestedModel): @@ -41,14 +40,7 @@ class LocalUserSecurityDomainModel(NDNestedModel): @model_serializer() def serialize_model(self) -> Dict: - return { - self.name: { - "roles": [ - USER_ROLES_MAPPING.get(role, role) - for role in (self.roles or []) - ] - } - } + return {self.name: {"roles": [USER_ROLES_MAPPING.get(role, role) for role in (self.roles or [])]}} # NOTE: Deserialization defined in `LocalUserModel` due to API response complexity @@ -60,7 +52,7 @@ class LocalUserModel(NDBaseModel): Identifier: login_id (single field) """ - + # Identifier configuration # TODO: Revisit this identifiers strategy (low priority) identifiers: ClassVar[Optional[List[str]]] = ["login_id"] @@ -69,11 +61,8 @@ class LocalUserModel(NDBaseModel): # Keys management configurations # TODO: Revisit these configurations (low priority) exclude_from_diff: ClassVar[List[str]] = ["user_password"] - unwanted_keys: ClassVar[List]= [ - ["passwordPolicy", "passwordChangeTime"], # Nested path - ["userID"] # Simple key - ] - + unwanted_keys: ClassVar[List] = [["passwordPolicy", "passwordChangeTime"], ["userID"]] # Nested path # Simple key + # Fields # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec # TODO: use extra for generating argument_spec (low priority) @@ -96,7 +85,7 @@ def password_policy(self) -> Optional[Dict[str, int]]: """Computed nested structure for API payload.""" if self.reuse_limitation is None and self.time_interval_limitation is None: return None - + policy = {} if self.reuse_limitation is not None: policy["reuseLimitation"] = self.reuse_limitation @@ -108,7 +97,6 @@ def password_policy(self) -> Optional[Dict[str, int]]: def serialize_password(self, value: Optional[SecretStr]) -> Optional[str]: return value.get_secret_value() if value else None - @field_serializer("security_domains") def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) -> Optional[Dict]: # NOTE: exclude `None` values and empty list (-> should we exclude empty list?) @@ -119,9 +107,7 @@ def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) for domain in value: domains_dict.update(domain.to_payload()) - return { - "domains": domains_dict - } + return {"domains": domains_dict} # -- Deserialization (API response / Ansible payload -> Model instance) -- @@ -132,17 +118,17 @@ def deserialize_password_policy(cls, data: Any) -> Any: return data password_policy = data.get("passwordPolicy") - + if password_policy and isinstance(password_policy, dict): if "reuseLimitation" in password_policy: data["reuse_limitation"] = password_policy["reuseLimitation"] if "timeIntervalLimitation" in password_policy: data["time_interval_limitation"] = password_policy["timeIntervalLimitation"] - + # Remove the nested structure from data to avoid conflicts # (since it's a computed field, not a real field) data.pop("passwordPolicy", None) - + return data @field_validator("security_domains", mode="before") @@ -150,24 +136,21 @@ def deserialize_password_policy(cls, data: Any) -> Any: def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: if value is None: return None - + # If already in list format (Ansible module representation), return as-is if isinstance(value, list): return value - + # If in the nested dict format (API representation) if isinstance(value, dict) and "domains" in value: domains_dict = value["domains"] domains_list = [] - + for domain_name, domain_data in domains_dict.items(): - domains_list.append({ - "name": domain_name, - "roles": [USER_ROLES_MAPPING.get(role, role) for role in domain_data.get("roles", [])] - }) - + domains_list.append({"name": domain_name, "roles": [USER_ROLES_MAPPING.get(role, role) for role in domain_data.get("roles", [])]}) + return domains_list - + return value # -- Extra -- diff --git a/plugins/module_utils/models/nested.py b/plugins/module_utils/models/nested.py index f2560819..0573e5f8 100644 --- a/plugins/module_utils/models/nested.py +++ b/plugins/module_utils/models/nested.py @@ -9,7 +9,7 @@ __metaclass__ = type from typing import List, ClassVar -from .base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel class NDNestedModel(NDBaseModel): diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 364519b8..1aa0e2ec 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -10,27 +10,26 @@ from typing import TypeVar, Generic, Optional, List, Dict, Any, Union, Tuple, Literal, Callable from copy import deepcopy +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -# TODO: To be replaced with: from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from .models.base import NDBaseModel -from .utils import issubset -from .types import IdentifierKey # Type aliases -ModelType = TypeVar('ModelType', bound=NDBaseModel) +ModelType = TypeVar("ModelType", bound=NDBaseModel) class NDConfigCollection(Generic[ModelType]): """ Nexus Dashboard configuration collection for NDBaseModel instances. """ - + def __init__(self, model_class: ModelType, items: Optional[List[ModelType]] = None): """ Initialize collection. """ self._model_class: ModelType = model_class - + # Dual storage self._items: List[ModelType] = [] self._index: Dict[IdentifierKey, int] = {} @@ -38,7 +37,7 @@ def __init__(self, model_class: ModelType, items: Optional[List[ModelType]] = No if items: for item in items: self.add(item) - + # TODO: might not be necessary def _extract_key(self, item: ModelType) -> IdentifierKey: """ @@ -48,7 +47,7 @@ def _extract_key(self, item: ModelType) -> IdentifierKey: return item.get_identifier_value() except Exception as e: raise ValueError(f"Failed to extract identifier: {e}") from e - + # TODO: optimize it -> only needed for delete method (low priority) def _rebuild_index(self) -> None: """Rebuild index from scratch (O(n) operation).""" @@ -56,55 +55,47 @@ def _rebuild_index(self) -> None: for index, item in enumerate(self._items): key = self._extract_key(item) self._index[key] = index - + # Core Operations - + def add(self, item: ModelType) -> IdentifierKey: """ Add item to collection (O(1) operation). """ if not isinstance(item, self._model_class): - raise TypeError( - f"Item must be instance of {self._model_class.__name__}, " - f"got {type(item).__name__}" - ) - + raise TypeError(f"Item must be instance of {self._model_class.__name__}, " f"got {type(item).__name__}") + key = self._extract_key(item) - + if key in self._index: - raise ValueError( - f"Item with identifier {key} already exists. Use replace() to update" - ) - + raise ValueError(f"Item with identifier {key} already exists. Use replace() to update") + position = len(self._items) self._items.append(item) self._index[key] = position - + return key - + def get(self, key: IdentifierKey) -> Optional[ModelType]: """ Get item by identifier key (O(1) operation). """ index = self._index.get(key) return self._items[index] if index is not None else None - + def replace(self, item: ModelType) -> bool: """ Replace existing item with same identifier (O(1) operation). """ if not isinstance(item, self._model_class): - raise TypeError( - f"Item must be instance of {self._model_class.__name__}, " - f"got {type(item).__name__}" - ) - + raise TypeError(f"Item must be instance of {self._model_class.__name__}, " f"got {type(item).__name__}") + key = self._extract_key(item) index = self._index.get(key) - + if index is None: return False - + self._items[index] = item return True @@ -114,7 +105,7 @@ def merge(self, item: ModelType) -> ModelType: """ key = self._extract_key(item) existing = self.get(key) - + if existing is None: self.add(item) return item @@ -128,17 +119,17 @@ def delete(self, key: IdentifierKey) -> bool: Delete item by identifier (O(n) operation due to index rebuild) """ index = self._index.get(key) - + if index is None: return False - + del self._items[index] self._rebuild_index() - + return True - + # Diff Operations - + # NOTE: Maybe add a similar one in the NDBaseModel (-> but is it necessary?) def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "changed"]: """ @@ -148,9 +139,9 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha key = self._extract_key(new_item) except ValueError: return "new" - + existing = self.get(key) - + if existing is None: return "new" @@ -158,16 +149,16 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha existing_data = existing.to_diff_dict() new_data = new_item.to_diff_dict() is_subset = issubset(new_data, existing_data) - + return "no_diff" if is_subset else "changed" - + def get_diff_collection(self, other: "NDConfigCollection[ModelType]") -> bool: """ Check if two collections differ. """ if not isinstance(other, NDConfigCollection): raise TypeError("Argument must be NDConfigCollection") - + if len(self) != len(other): return True @@ -178,9 +169,9 @@ def get_diff_collection(self, other: "NDConfigCollection[ModelType]") -> bool: for key in self.keys(): if other.get(key) is None: return True - + return False - + def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[IdentifierKey]: """ Get identifiers in self but not in other. @@ -190,11 +181,11 @@ def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[I return list(current_keys - other_keys) # Collection Operations - + def __len__(self) -> int: """Return number of items.""" return len(self._items) - + def __iter__(self): """Iterate over items.""" return iter(self._items) @@ -205,10 +196,7 @@ def keys(self) -> List[IdentifierKey]: def copy(self) -> "NDConfigCollection[ModelType]": """Create deep copy of collection.""" - return NDConfigCollection( - model_class=self._model_class, - items=deepcopy(self._items) - ) + return NDConfigCollection(model_class=self._model_class, items=deepcopy(self._items)) # Collection Serialization @@ -217,13 +205,13 @@ def to_ansible_config(self, **kwargs) -> List[Dict]: Export as an Ansible config. """ return [item.to_config(**kwargs) for item in self._items] - + def to_payload_list(self, **kwargs) -> List[Dict[str, Any]]: """ Export as list of API payloads. """ return [item.to_payload(**kwargs) for item in self._items] - + @classmethod def from_ansible_config(cls, data: List[Dict], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": """ @@ -231,7 +219,7 @@ def from_ansible_config(cls, data: List[Dict], model_class: type[ModelType], **k """ items = [model_class.from_config(item_data, **kwargs) for item_data in data] return cls(model_class=model_class, items=items) - + @classmethod def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": """ diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 5b1f770c..be5849d4 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -12,31 +12,25 @@ from typing import Optional, List, Dict, Any, Literal, Type from pydantic import ValidationError from ansible.module_utils.basic import AnsibleModule - -# TODO: To be replaced with: -# from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule -# from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection -# from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator -# from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -# from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED -from .nd import NDModule -from .nd_config_collection import NDConfigCollection -from .orchestrators.base import NDBaseOrchestrator -from .types import IdentifierKey -from .constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey +from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +# TODO: Revisit StateMachine when there is more arguments than config (e.g., "fabric" and "config" for switches config) +# TODO: class NDStateMachine(NDModule): """ Generic Network Resource Module for Nexus Dashboard. """ - + def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchestrator]): """ Initialize the Network Resource Module. """ - # TODO: Revisit Module initialization and configuration - # nd_module = NDModule() + # TODO: Revisit Module initialization and configuration with rest_send self.module = module self.nd_module = NDModule(module) @@ -51,18 +45,13 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.state = self.module.params["state"] self.ansible_config = self.module.params.get("config", []) - # Initialize collections - # TODO: Revisit collections initialization especially `init_all_data` (medium priority) # TODO: Revisit class variables `previous`, `existing`, etc... (medium priority) self.nd_config_collection = NDConfigCollection[self.model_class] try: init_all_data = self.model_orchestrator.query_all() - self.existing = self.nd_config_collection.from_api_response( - response_data=init_all_data, - model_class=self.model_class - ) + self.existing = self.nd_config_collection.from_api_response(response_data=init_all_data, model_class=self.model_class) # Save previous state self.previous = self.existing.copy() self.proposed = self.nd_config_collection(model_class=self.model_class) @@ -74,30 +63,23 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest item = self.model_class.from_config(config) self.proposed.add(item) except ValidationError as e: - self.fail_json( - msg=f"Invalid configuration: {e}", - config=config, - validation_errors=e.errors() - ) + self.fail_json(msg=f"Invalid configuration: {e}", config=config, validation_errors=e.errors()) return except Exception as e: - self.fail_json( - msg=f"Initialization failed: {str(e)}", - error=str(e) - ) + self.fail_json(msg=f"Initialization failed: {str(e)}", error=str(e)) # Logging # NOTE: format log placeholder # TODO: use a proper logger (low priority) def format_log( - self, - identifier: IdentifierKey, - operation_status: Literal["no_change", "created", "updated", "deleted"], - before: Optional[Dict[str, Any]] = None, - after: Optional[Dict[str, Any]] = None, - payload: Optional[Dict[str, Any]] = None, - ) -> None: + self, + identifier: IdentifierKey, + operation_status: Literal["no_change", "created", "updated", "deleted"], + before: Optional[Dict[str, Any]] = None, + after: Optional[Dict[str, Any]] = None, + payload: Optional[Dict[str, Any]] = None, + ) -> None: """ Create and append a log entry. """ @@ -108,18 +90,15 @@ def format_log( "after": after, "payload": payload, } - + # Add HTTP details if not in check mode if not self.module.check_mode and self.nd_module.url is not None: - log_entry.update({ - "method": self.nd_module.method, - "response": self.nd_module.response, - "status": self.nd_module.status, - "url": self.nd_module.url - }) - + log_entry.update( + {"method": self.nd_module.method, "response": self.nd_module.response, "status": self.nd_module.status, "url": self.nd_module.url} + ) + self.nd_logs.append(log_entry) - + # State Management (core function) # TODO: adapt all `manage` functions to endpoint/orchestrator strategies (Top priority) def manage_state(self) -> None: @@ -129,17 +108,17 @@ def manage_state(self) -> None: # Execute state operations if self.state in ["merged", "replaced", "overridden"]: self._manage_create_update_state() - + if self.state == "overridden": self._manage_override_deletions() - + elif self.state == "deleted": self._manage_delete_state() - + # TODO: not needed with Ansible `argument_spec` validation. Keep it for now but needs to be removed (low priority) + # TODO: boil down an Exception instead of using `fail_json` method else: self.fail_json(msg=f"Invalid state: {self.state}") - def _manage_create_update_state(self) -> None: """ @@ -152,7 +131,7 @@ def _manage_create_update_state(self) -> None: try: # Determine diff status diff_status = self.existing.get_diff_config(proposed_item) - + # No changes needed if diff_status == "no_diff": self.format_log( @@ -162,7 +141,7 @@ def _manage_create_update_state(self) -> None: after=existing_config, ) continue - + # Prepare final config based on state if self.state == "merged": # Merge with existing @@ -187,7 +166,7 @@ def _manage_create_update_state(self) -> None: response = self.model_orchestrator.create(final_item) self.sent.add(final_item) operation_status = "created" - + # Log operation self.format_log( identifier=identifier, @@ -196,32 +175,27 @@ def _manage_create_update_state(self) -> None: after=self.model_class.model_validate(response).to_config() if not self.module.check_mode else final_item.to_config(), payload=final_item.to_payload(), ) - + except Exception as e: error_msg = f"Failed to process {identifier}: {e}" - + self.format_log( identifier=identifier, operation_status="no_change", before=existing_config, after=existing_config, ) - + if not self.module.params.get("ignore_errors", False): - self.fail_json( - msg=error_msg, - identifier=str(identifier), - error=str(e) - ) + self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) return - - # TODO: Refactor with orchestrator (Top priority) + def _manage_override_deletions(self) -> None: """ Delete items not in proposed config (for overridden state). """ diff_identifiers = self.previous.get_diff_identifiers(self.proposed) - + for identifier in diff_identifiers: try: existing_item = self.existing.get(identifier) @@ -231,37 +205,31 @@ def _manage_override_deletions(self) -> None: # Execute delete if not self.module.check_mode: response = self.model_orchestrator.delete(existing_item) - + # Remove from collection self.existing.delete(identifier) - + # Log deletion self.format_log( identifier=identifier, operation_status="deleted", before=existing_item.to_config(), after={}, - ) - + except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" - + if not self.module.params.get("ignore_errors", False): - self.fail_json( - msg=error_msg, - identifier=str(identifier), - error=str(e) - ) + self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) return - - # TODO: Refactor with orchestrator (Top priority) + def _manage_delete_state(self) -> None: """Handle deleted state.""" for proposed_item in self.proposed: try: identifier = proposed_item.get_identifier_value() - + existing_item = self.existing.get(identifier) if not existing_item: # Already deleted or doesn't exist @@ -272,14 +240,14 @@ def _manage_delete_state(self) -> None: after={}, ) continue - + # Execute delete if not self.module.check_mode: response = self.model_orchestrator.delete(existing_item) - + # Remove from collection self.existing.delete(identifier) - + # Log deletion self.format_log( identifier=identifier, @@ -287,18 +255,14 @@ def _manage_delete_state(self) -> None: before=existing_item.to_config(), after={}, ) - + except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" - + if not self.module.params.get("ignore_errors", False): - self.fail_json( - msg=error_msg, - identifier=str(identifier), - error=str(e) - ) + self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) return - + # Output Formatting # TODO: move to separate Class (results) -> align it with rest_send PR # TODO: return a defined ordered list of config (for integration test) @@ -306,36 +270,36 @@ def add_logs_and_outputs(self) -> None: """Add logs and outputs to module result based on output_level.""" output_level = self.module.params.get("output_level", "normal") state = self.module.params.get("state") - + # Add previous state for certain states and output levels if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: if output_level in ("debug", "info"): self.result["previous"] = self.previous.to_ansible_config() - + # Check if there were changes if self.previous.get_diff_collection(self.existing): self.result["changed"] = True - + # Add stdout if present if self.nd_module.stdout: self.result["stdout"] = self.nd_module.stdout - + # Add debug information if output_level == "debug": self.result["nd_logs"] = self.nd_logs - + if self.nd_module.url is not None: self.result["httpapi_logs"] = self.nd_module.httpapi_logs - + if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: self.result["sent"] = self.sent.to_payload_list() self.result["proposed"] = self.proposed.to_ansible_config() - + # Always include current state self.result["current"] = self.existing.to_ansible_config() - + # Module Exit Methods - + def fail_json(self, msg: str, **kwargs) -> None: """ Exit module with failure. @@ -343,26 +307,23 @@ def fail_json(self, msg: str, **kwargs) -> None: self.add_logs_and_outputs() self.result.update(**kwargs) self.module.fail_json(msg=msg, **self.result) - + def exit_json(self, **kwargs) -> None: """ Exit module successfully. """ self.add_logs_and_outputs() - + # Add diff if module supports it if self.module._diff and self.result.get("changed") is True: try: # Use diff-safe dicts (excludes sensitive fields) before = [item.to_diff_dict() for item in self.previous] after = [item.to_diff_dict() for item in self.existing] - - self.result["diff"] = dict( - before=before, - after=after - ) + + self.result["diff"] = dict(before=before, after=after) except Exception: pass # Don't fail on diff generation - + self.result.update(**kwargs) self.module.exit_json(**self.result) diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 924ea4b0..f9a63fa1 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -8,11 +8,11 @@ __metaclass__ = type -from ..models.base import NDBaseModel -from ..nd import NDModule -from ..api_endpoints.base import NDBaseSmartEndpoint -from typing import Dict, List, Any, Union, ClassVar, Type, Optional from pydantic import BaseModel, ConfigDict +from typing import Dict, List, Any, Union, ClassVar, Type, Optional +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] @@ -20,7 +20,6 @@ # TODO: Revisit naming them "Orchestrator" class NDBaseOrchestrator(BaseModel): - model_config = ConfigDict( use_enum_values=True, validate_assignment=True, diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 46a4ea07..04f7707f 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -8,12 +8,12 @@ __metaclass__ = type -from .base import NDBaseOrchestrator -from ..models.base import NDBaseModel -from ..models.local_user import LocalUserModel from typing import Dict, List, Any, Union, Type -from ..api_endpoints.base import NDBaseSmartEndpoint -from ..api_endpoints.local_user import ( +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.local_user import ( EpApiV1InfraAaaLocalUsersPost, EpApiV1InfraAaaLocalUsersPut, EpApiV1InfraAaaLocalUsersDelete, @@ -23,8 +23,8 @@ ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] -class LocalUserOrchestrator(NDBaseOrchestrator): +class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel create_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index a7c1d3dc..0bf7cfc8 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -37,22 +37,22 @@ def issubset(subset: Any, superset: Any) -> bool: """Check if subset is contained in superset.""" if type(subset) is not type(superset): return False - + if not isinstance(subset, dict): if isinstance(subset, list): return all(item in superset for item in subset) return subset == superset - + for key, value in subset.items(): if value is None: continue - + if key not in superset: return False - + if not issubset(value, superset[key]): return False - + return True @@ -60,12 +60,12 @@ def issubset(subset: Any, superset: Any) -> bool: def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: """Remove unwanted keys from dict (supports nested paths).""" data = deepcopy(data) - + for key in unwanted_keys: if isinstance(key, str): if key in data: del data[key] - + elif isinstance(key, list) and len(key) > 0: try: parent = data @@ -79,5 +79,5 @@ def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) del parent[key[-1]] except (KeyError, TypeError, IndexError): pass - + return data diff --git a/plugins/modules/nd_api_key.py b/plugins/modules/nd_api_key.py index c00428a9..1a3e4823 100644 --- a/plugins/modules/nd_api_key.py +++ b/plugins/modules/nd_api_key.py @@ -146,7 +146,6 @@ def main(): nd.existing = nd.previous = nd.query_objs(path, key="apiKeys") if state == "present": - if len(api_key_name) > 32 or len(api_key_name) < 1: nd.fail_json("A length of 1 to 32 characters is allowed.") elif re.search(r"[^a-zA-Z0-9_.-]", api_key_name): diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index b6acee72..a6972c07 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -175,15 +175,10 @@ """ from ansible.module_utils.basic import AnsibleModule -# TODO: To be replaced with: -# from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec -# from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine -# from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel -# from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.local_user import LocalUserOrchestrator -from ..module_utils.nd import nd_argument_spec -from ..module_utils.nd_state_machine import NDStateMachine -from ..module_utils.models.local_user import LocalUserModel -from ..module_utils.orchestrators.local_user import LocalUserOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec +from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine +from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.local_user import LocalUserOrchestrator def main(): @@ -196,17 +191,17 @@ def main(): ) try: - # Create NDNetworkResourceModule with LocalUserModel - nd_module = NDStateMachine( + # Initialize StateMachine + nd_state_machine = NDStateMachine( module=module, model_orchestrator=LocalUserOrchestrator, ) - + # Manage state - nd_module.manage_state() + nd_state_machine.manage_state() + + nd_state_machine.exit_json() - nd_module.exit_json() - except Exception as e: module.fail_json(msg=f"Module execution failed: {str(e)}") From 1d96db11cc1bd04dea83118b2053efd57fc2b04e Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 3 Mar 2026 12:02:02 -0500 Subject: [PATCH 044/109] [ignore] Clean code for sanity purposes (except Pydantic import checks. --- plugins/module_utils/api_endpoints/base.py | 2 +- plugins/module_utils/api_endpoints/enums.py | 5 +++++ plugins/module_utils/api_endpoints/local_user.py | 4 ++-- plugins/module_utils/api_endpoints/mixins.py | 2 +- plugins/module_utils/models/base.py | 5 +++-- plugins/module_utils/models/local_user.py | 2 -- plugins/module_utils/nd_config_collection.py | 3 +-- plugins/module_utils/nd_state_machine.py | 1 - plugins/module_utils/orchestrators/base.py | 6 ++---- plugins/module_utils/orchestrators/local_user.py | 8 +++----- plugins/module_utils/orchestrators/types.py | 13 +++++++++++++ plugins/module_utils/types.py | 1 - 12 files changed, 31 insertions(+), 21 deletions(-) create mode 100644 plugins/module_utils/orchestrators/types.py diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py index 954c1f6a..8428ffe8 100644 --- a/plugins/module_utils/api_endpoints/base.py +++ b/plugins/module_utils/api_endpoints/base.py @@ -11,7 +11,7 @@ from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict -from typing import Final, Union, Tuple, Any +from typing import Final from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import IdentifierKey diff --git a/plugins/module_utils/api_endpoints/enums.py b/plugins/module_utils/api_endpoints/enums.py index ced62ba7..18a7f5eb 100644 --- a/plugins/module_utils/api_endpoints/enums.py +++ b/plugins/module_utils/api_endpoints/enums.py @@ -7,6 +7,11 @@ """ Enums used in api_endpoints. """ + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + from enum import Enum diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/api_endpoints/local_user.py index 72639495..890b38e7 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/api_endpoints/local_user.py @@ -13,9 +13,9 @@ from __future__ import absolute_import, division, print_function -__metaclass__ = type # pylint: disable=invalid-name +__metaclass__ = type -from typing import Literal, Union, Tuple, Any, Final +from typing import Literal, Final from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.mixins import LoginIdMixin from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.enums import VerbEnum from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint, NDBasePath diff --git a/plugins/module_utils/api_endpoints/mixins.py b/plugins/module_utils/api_endpoints/mixins.py index 9516c9ce..56cdcfc5 100644 --- a/plugins/module_utils/api_endpoints/mixins.py +++ b/plugins/module_utils/api_endpoints/mixins.py @@ -15,7 +15,7 @@ __metaclass__ = type # pylint: disable=invalid-name -from typing import TYPE_CHECKING, Optional +from typing import Optional from pydantic import BaseModel, Field diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 94fb9cc5..8cdcc765 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -8,7 +8,7 @@ __metaclass__ = type -from abc import ABC, abstractmethod +from abc import ABC from pydantic import BaseModel, ConfigDict from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal, Optional from typing_extensions import Self @@ -144,7 +144,8 @@ def to_diff_dict(self, **kwargs) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), **kwargs) # NOTE: initialize and return a deep copy of the instance? - # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? -> similar to NDCOnfigCollection... -> add argument to make it optional either replace + # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? + # -> similar to NDCOnfigCollection... -> add argument to make it optional either replace def merge(self, other_model: "NDBaseModel", **kwargs) -> Self: if not isinstance(other_model, type(self)): # TODO: Change error message diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index e759a6fb..fe2f2bb5 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -8,9 +8,7 @@ __metaclass__ = type -from types import MappingProxyType from typing import List, Dict, Any, Optional, ClassVar, Literal -from typing_extensions import Self from pydantic import Field, SecretStr, model_serializer, field_serializer, field_validator, model_validator, computed_field from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 1aa0e2ec..5fd9886d 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -8,13 +8,12 @@ __metaclass__ = type -from typing import TypeVar, Generic, Optional, List, Dict, Any, Union, Tuple, Literal, Callable +from typing import TypeVar, Generic, Optional, List, Dict, Any, Literal from copy import deepcopy from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey - # Type aliases ModelType = TypeVar("ModelType", bound=NDBaseModel) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index be5849d4..923f0b69 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -8,7 +8,6 @@ __metaclass__ = type -from copy import deepcopy from typing import Optional, List, Dict, Any, Literal, Type from pydantic import ValidationError from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index f9a63fa1..4df0797d 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -9,13 +9,11 @@ __metaclass__ = type from pydantic import BaseModel, ConfigDict -from typing import Dict, List, Any, Union, ClassVar, Type, Optional +from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint - - -ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import ResponseType # TODO: Revisit naming them "Orchestrator" diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 04f7707f..d30b29f8 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -8,11 +8,12 @@ __metaclass__ = type -from typing import Dict, List, Any, Union, Type +from typing import Type from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import ResponseType from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.local_user import ( EpApiV1InfraAaaLocalUsersPost, EpApiV1InfraAaaLocalUsersPut, @@ -21,9 +22,6 @@ ) -ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] - - class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel @@ -33,7 +31,7 @@ class LocalUserOrchestrator(NDBaseOrchestrator): query_one_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet query_all_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet - def query_all(self): + def query_all(self) -> ResponseType: """ Custom query_all action to extract 'localusers' from response. """ diff --git a/plugins/module_utils/orchestrators/types.py b/plugins/module_utils/orchestrators/types.py new file mode 100644 index 00000000..b721c65b --- /dev/null +++ b/plugins/module_utils/orchestrators/types.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Union, List, Dict + +ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] diff --git a/plugins/module_utils/types.py b/plugins/module_utils/types.py index 124aedd5..3111a095 100644 --- a/plugins/module_utils/types.py +++ b/plugins/module_utils/types.py @@ -10,5 +10,4 @@ from typing import Any, Union, Tuple - IdentifierKey = Union[str, int, Tuple[Any, ...]] From 5d1f52f4ead919d3afe3968c587414c5c2639622 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 3 Mar 2026 12:18:35 -0500 Subject: [PATCH 045/109] [ignore] Restructure api_endpoints folder into endpoints -> v1. Fix some sanity issues. --- plugins/module_utils/api_endpoints/base.py | 180 ------------------ plugins/module_utils/api_endpoints/mixins.py | 25 --- plugins/module_utils/endpoints/base.py | 7 + .../{api_endpoints => endpoints}/enums.py | 2 +- plugins/module_utils/endpoints/mixins.py | 3 +- .../v1/infra_aaa_local_users.py} | 32 ++-- plugins/module_utils/nd_state_machine.py | 1 + plugins/module_utils/orchestrators/base.py | 4 +- .../module_utils/orchestrators/local_user.py | 24 +-- 9 files changed, 41 insertions(+), 237 deletions(-) delete mode 100644 plugins/module_utils/api_endpoints/base.py delete mode 100644 plugins/module_utils/api_endpoints/mixins.py rename plugins/module_utils/{api_endpoints => endpoints}/enums.py (97%) rename plugins/module_utils/{api_endpoints/local_user.py => endpoints/v1/infra_aaa_local_users.py} (74%) diff --git a/plugins/module_utils/api_endpoints/base.py b/plugins/module_utils/api_endpoints/base.py deleted file mode 100644 index 8428ffe8..00000000 --- a/plugins/module_utils/api_endpoints/base.py +++ /dev/null @@ -1,180 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Allen Robel (@allenrobel) -# Copyright: (c) 2026, Gaspard Micol (@gmicol) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -from abc import ABC, abstractmethod -from pydantic import BaseModel, ConfigDict -from typing import Final -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import IdentifierKey - - -# TODO: Rename it to APIEndpoint -# NOTE: This is a very minimalist endpoint package -> needs to be enhanced -class NDBaseSmartEndpoint(BaseModel, ABC): - # TODO: maybe to be modified in the future - model_config = ConfigDict(validate_assignment=True) - - # TODO: to remove - base_path: str - - @property - @abstractmethod - def path(self) -> str: - pass - - @property - @abstractmethod - def verb(self) -> str: - pass - - # TODO: Maybe to be modifed to be more Pydantic (low priority) - # TODO: Maybe change function's name (low priority) - # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration - @abstractmethod - def set_identifiers(self, identifier: IdentifierKey = None): - pass - - -class NDBasePath: - """ - # Summary - - Centralized API Base Paths - - ## Description - - Provides centralized base path definitions for all ND API endpoints. - This allows API path changes to be managed in a single location. - - ## Usage - - ```python - # Get a complete base path - path = BasePath.control_fabrics("MyFabric", "config-deploy") - # Returns: /appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/MyFabric/config-deploy - - # Build custom paths - path = BasePath.v1("custom", "endpoint") - # Returns: /appcenter/cisco/ndfc/api/v1/custom/endpoint - ``` - - ## Design Notes - - - All base paths are defined as class constants for easy modification - - Helper methods compose paths from base constants - - Use these methods in Pydantic endpoint models to ensure consistency - - If NDFC changes base API paths, only this class needs updating - """ - - # Root API paths - NDFC_API: Final = "/appcenter/cisco/ndfc/api" - ND_INFRA_API: Final = "/api/v1/infra" - ONEMANAGE: Final = "/onemanage" - LOGIN: Final = "/login" - - @classmethod - def api(cls, *segments: str) -> str: - """ - # Summary - - Build path from NDFC API root. - - ## Parameters - - - segments: Path segments to append - - ## Returns - - - Complete path string - - ## Example - - ```python - path = BasePath.api("custom", "endpoint") - # Returns: /appcenter/cisco/ndfc/api/custom/endpoint - ``` - """ - if not segments: - return cls.NDFC_API - return f"{cls.NDFC_API}/{'/'.join(segments)}" - - @classmethod - def v1(cls, *segments: str) -> str: - """ - # Summary - - Build v1 API path. - - ## Parameters - - - segments: Path segments to append after v1 - - ## Returns - - - Complete v1 API path - - ## Example - - ```python - path = BasePath.v1("lan-fabric", "rest") - # Returns: /appcenter/cisco/ndfc/api/v1/lan-fabric/rest - ``` - """ - return cls.api("v1", *segments) - - @classmethod - def nd_infra(cls, *segments: str) -> str: - """ - # Summary - - Build ND infra API path. - - ## Parameters - - - segments: Path segments to append after /api/v1/infra - - ## Returns - - - Complete ND infra API path - - ## Example - - ```python - path = BasePath.nd_infra("aaa", "localUsers") - # Returns: /api/v1/infra/aaa/localUsers - ``` - """ - if not segments: - return cls.ND_INFRA_API - return f"{cls.ND_INFRA_API}/{'/'.join(segments)}" - - @classmethod - def nd_infra_aaa(cls, *segments: str) -> str: - """ - # Summary - - Build ND infra AAA API path. - - ## Parameters - - - segments: Path segments to append after aaa (e.g., "localUsers") - - ## Returns - - - Complete ND infra AAA path - - ## Example - - ```python - path = BasePath.nd_infra_aaa("localUsers") - # Returns: /api/v1/infra/aaa/localUsers - ``` - """ - return cls.nd_infra("aaa", *segments) diff --git a/plugins/module_utils/api_endpoints/mixins.py b/plugins/module_utils/api_endpoints/mixins.py deleted file mode 100644 index 56cdcfc5..00000000 --- a/plugins/module_utils/api_endpoints/mixins.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Allen Robel (@allenrobel) -# Copyright: (c) 2026, Gaspard Micol (@gmicol) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -""" -Reusable mixin classes for endpoint models. - -This module provides mixin classes that can be composed to add common -fields to endpoint models without duplication. -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type # pylint: disable=invalid-name - -from typing import Optional -from pydantic import BaseModel, Field - - -class LoginIdMixin(BaseModel): - """Mixin for endpoints that require login_id parameter.""" - - login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py index 3ccdff1c..bfd59ee1 100644 --- a/plugins/module_utils/endpoints/base.py +++ b/plugins/module_utils/endpoints/base.py @@ -1,4 +1,5 @@ # Copyright: (c) 2026, Allen Robel (@arobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ @@ -22,6 +23,7 @@ Field, ) from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import IdentifierKey class NDEndpointBaseModel(BaseModel, ABC): @@ -129,3 +131,8 @@ def verb(self) -> HttpVerbEnum: None """ + + # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration + @abstractmethod + def set_identifiers(self, identifier: IdentifierKey = None): + pass diff --git a/plugins/module_utils/api_endpoints/enums.py b/plugins/module_utils/endpoints/enums.py similarity index 97% rename from plugins/module_utils/api_endpoints/enums.py rename to plugins/module_utils/endpoints/enums.py index 18a7f5eb..802b8fe8 100644 --- a/plugins/module_utils/api_endpoints/enums.py +++ b/plugins/module_utils/endpoints/enums.py @@ -5,7 +5,7 @@ # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ -Enums used in api_endpoints. +Enums used in endpoints. """ from __future__ import absolute_import, division, print_function diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index 47695611..22d9a2dc 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -1,4 +1,5 @@ -# Copyright: (c) 2026, Allen Robel (@arobel) +# Copyright: (c) 2026, Allen Robel (@allenrobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ diff --git a/plugins/module_utils/api_endpoints/local_user.py b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py similarity index 74% rename from plugins/module_utils/api_endpoints/local_user.py rename to plugins/module_utils/endpoints/v1/infra_aaa_local_users.py index 890b38e7..1e1d7823 100644 --- a/plugins/module_utils/api_endpoints/local_user.py +++ b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py @@ -16,14 +16,14 @@ __metaclass__ = type from typing import Literal, Final -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.mixins import LoginIdMixin -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.enums import VerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint, NDBasePath +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.enums import VerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint, NDBasePath from pydantic import Field from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -class _EpApiV1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): +class _V1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): """ Base class for ND Infra AAA Local Users endpoints. @@ -53,7 +53,7 @@ def set_identifiers(self, identifier: IdentifierKey = None): self.login_id = identifier -class EpApiV1InfraAaaLocalUsersGet(_EpApiV1InfraAaaLocalUsersBase): +class V1InfraAaaLocalUsersGet(_V1InfraAaaLocalUsersBase): """ # Summary @@ -74,8 +74,8 @@ class EpApiV1InfraAaaLocalUsersGet(_EpApiV1InfraAaaLocalUsersBase): - GET """ - class_name: Literal["EpApiV1InfraAaaLocalUsersGet"] = Field( - default="EpApiV1InfraAaaLocalUsersGet", + class_name: Literal["V1InfraAaaLocalUsersGet"] = Field( + default="V1InfraAaaLocalUsersGet", description="Class name for backward compatibility", frozen=True, ) @@ -86,7 +86,7 @@ def verb(self) -> VerbEnum: return VerbEnum.GET -class EpApiV1InfraAaaLocalUsersPost(_EpApiV1InfraAaaLocalUsersBase): +class V1InfraAaaLocalUsersPost(_V1InfraAaaLocalUsersBase): """ # Summary @@ -105,8 +105,8 @@ class EpApiV1InfraAaaLocalUsersPost(_EpApiV1InfraAaaLocalUsersBase): - POST """ - class_name: Literal["EpApiV1InfraAaaLocalUsersPost"] = Field( - default="EpApiV1InfraAaaLocalUsersPost", + class_name: Literal["V1InfraAaaLocalUsersPost"] = Field( + default="V1InfraAaaLocalUsersPost", description="Class name for backward compatibility", frozen=True, ) @@ -117,7 +117,7 @@ def verb(self) -> VerbEnum: return VerbEnum.POST -class EpApiV1InfraAaaLocalUsersPut(_EpApiV1InfraAaaLocalUsersBase): +class V1InfraAaaLocalUsersPut(_V1InfraAaaLocalUsersBase): """ # Summary @@ -136,8 +136,8 @@ class EpApiV1InfraAaaLocalUsersPut(_EpApiV1InfraAaaLocalUsersBase): - PUT """ - class_name: Literal["EpApiV1InfraAaaLocalUsersPut"] = Field( - default="EpApiV1InfraAaaLocalUsersPut", + class_name: Literal["V1InfraAaaLocalUsersPut"] = Field( + default="V1InfraAaaLocalUsersPut", description="Class name for backward compatibility", frozen=True, ) @@ -148,7 +148,7 @@ def verb(self) -> VerbEnum: return VerbEnum.PUT -class EpApiV1InfraAaaLocalUsersDelete(_EpApiV1InfraAaaLocalUsersBase): +class V1InfraAaaLocalUsersDelete(_V1InfraAaaLocalUsersBase): """ # Summary @@ -167,8 +167,8 @@ class EpApiV1InfraAaaLocalUsersDelete(_EpApiV1InfraAaaLocalUsersBase): - DELETE """ - class_name: Literal["EpApiV1InfraAaaLocalUsersDelete"] = Field( - default="EpApiV1InfraAaaLocalUsersDelete", + class_name: Literal["V1InfraAaaLocalUsersDelete"] = Field( + default="V1InfraAaaLocalUsersDelete", description="Class name for backward compatibility", frozen=True, ) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 923f0b69..ae0a67ce 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -125,6 +125,7 @@ def _manage_create_update_state(self) -> None: """ for proposed_item in self.proposed: # Extract identifier + response = {} identifier = proposed_item.get_identifier_value() existing_config = self.existing.get(identifier).to_config() if self.existing.get(identifier) else {} try: diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 4df0797d..8c84de8e 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -12,8 +12,8 @@ from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import ResponseType # TODO: Revisit naming them "Orchestrator" diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index d30b29f8..bea4a486 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -12,24 +12,24 @@ from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.base import NDBaseSmartEndpoint -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.types import ResponseType -from ansible_collections.cisco.nd.plugins.module_utils.api_endpoints.local_user import ( - EpApiV1InfraAaaLocalUsersPost, - EpApiV1InfraAaaLocalUsersPut, - EpApiV1InfraAaaLocalUsersDelete, - EpApiV1InfraAaaLocalUsersGet, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra_aaa_local_users import ( + V1InfraAaaLocalUsersPost, + V1InfraAaaLocalUsersPut, + V1InfraAaaLocalUsersDelete, + V1InfraAaaLocalUsersGet, ) class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel - create_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPost - update_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersPut - delete_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersDelete - query_one_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet - query_all_endpoint: Type[NDBaseSmartEndpoint] = EpApiV1InfraAaaLocalUsersGet + create_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersPost + update_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersPut + delete_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersDelete + query_one_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersGet + query_all_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersGet def query_all(self) -> ResponseType: """ From 039103ee8d322b85dc7b2e74449b902fb9673d65 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 3 Mar 2026 15:06:04 -0500 Subject: [PATCH 046/109] [ignore] Remove NDModule inheritence from NDStateMachine. Add first iteration of (Mock Pydantic objects/methods) to pass sanity checks for Pydantic importation. --- plugins/module_utils/nd_state_machine.py | 6 +- plugins/module_utils/pydantic_compat.py | 200 +++++++++++++++++++++++ 2 files changed, 203 insertions(+), 3 deletions(-) create mode 100644 plugins/module_utils/pydantic_compat.py diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index ae0a67ce..e68010fb 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -19,8 +19,8 @@ # TODO: Revisit StateMachine when there is more arguments than config (e.g., "fabric" and "config" for switches config) -# TODO: -class NDStateMachine(NDModule): +# TODO: Remove inheritence from NDModule (Top Priority) +class NDStateMachine: """ Generic Network Resource Module for Nexus Dashboard. """ @@ -31,7 +31,7 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest """ # TODO: Revisit Module initialization and configuration with rest_send self.module = module - self.nd_module = NDModule(module) + self.nd_module = NDModule(self.module) # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] diff --git a/plugins/module_utils/pydantic_compat.py b/plugins/module_utils/pydantic_compat.py new file mode 100644 index 00000000..f1d90fe3 --- /dev/null +++ b/plugins/module_utils/pydantic_compat.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +# pylint: disable=too-few-public-methods +""" +Pydantic compatibility layer. + +This module provides a single location for Pydantic imports with fallback +implementations when Pydantic is not available. This ensures consistent +behavior across all modules and follows the DRY principle. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import traceback +from typing import TYPE_CHECKING, Any, Callable, Union + +if TYPE_CHECKING: + # Type checkers always see the real Pydantic types + from pydantic import ( + AfterValidator, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + PydanticExperimentalWarning, + StrictBool, + ValidationError, + field_serializer, + model_serializer, + field_validator, + model_validator, + validator, + ) +else: + # Runtime: try to import, with fallback + try: + from pydantic import ( + AfterValidator, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + PydanticExperimentalWarning, + StrictBool, + ValidationError, + field_serializer, + model_serializer, + field_validator, + model_validator, + validator, + ) + except ImportError: + HAS_PYDANTIC = False # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR: Union[str, None] = traceback.format_exc() # pylint: disable=invalid-name + + # Fallback: Minimal BaseModel replacement + class BaseModel: + """Fallback BaseModel when pydantic is not available.""" + + model_config = {"validate_assignment": False, "use_enum_values": False} + + def __init__(self, **kwargs): + """Accept keyword arguments and set them as attributes.""" + for key, value in kwargs.items(): + setattr(self, key, value) + + def model_dump(self, exclude_none: bool = False, exclude_defaults: bool = False) -> dict: # pylint: disable=unused-argument + """Return a dictionary of field names and values. + + Args: + exclude_none: If True, exclude fields with None values + exclude_defaults: Accepted for API compatibility but not implemented in fallback + """ + result = {} + for key, value in self.__dict__.items(): + if exclude_none and value is None: + continue + result[key] = value + return result + + # Fallback: ConfigDict that does nothing + def ConfigDict(**kwargs) -> dict: # pylint: disable=unused-argument,invalid-name + """Pydantic ConfigDict fallback when pydantic is not available.""" + return kwargs + + # Fallback: Field that does nothing + def Field(**kwargs) -> Any: # pylint: disable=unused-argument,invalid-name + """Pydantic Field fallback when pydantic is not available.""" + if "default_factory" in kwargs: + return kwargs["default_factory"]() + return kwargs.get("default") + + # Fallback: field_serializer decorator that does nothing + def field_serializer(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic field_serializer fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: model_serializer decorator that does nothing + def model_serializer(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic model_serializer fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: field_validator decorator that does nothing + def field_validator(*args, **kwargs) -> Callable[..., Any]: # pylint: disable=unused-argument,invalid-name + """Pydantic field_validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: AfterValidator that returns the function unchanged + def AfterValidator(func): # pylint: disable=invalid-name + """Pydantic AfterValidator fallback when pydantic is not available.""" + return func + + # Fallback: BeforeValidator that returns the function unchanged + def BeforeValidator(func): # pylint: disable=invalid-name + """Pydantic BeforeValidator fallback when pydantic is not available.""" + return func + + # Fallback: PydanticExperimentalWarning + PydanticExperimentalWarning = Warning + + # Fallback: StrictBool + StrictBool = bool + + # Fallback: ValidationError + class ValidationError(Exception): + """ + Pydantic ValidationError fallback when pydantic is not available. + """ + + def __init__(self, message="A custom error occurred."): + self.message = message + super().__init__(self.message) + + def __str__(self): + return f"ValidationError: {self.message}" + + # Fallback: model_validator decorator that does nothing + def model_validator(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic model_validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + # Fallback: validator decorator that does nothing + def validator(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic validator fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + + else: + HAS_PYDANTIC = True # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name + +# Set HAS_PYDANTIC for when TYPE_CHECKING is True +if TYPE_CHECKING: + HAS_PYDANTIC = True # pylint: disable=invalid-name + PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name + +__all__ = [ + "AfterValidator", + "BaseModel", + "BeforeValidator", + "ConfigDict", + "Field", + "HAS_PYDANTIC", + "PYDANTIC_IMPORT_ERROR", + "PydanticExperimentalWarning", + "StrictBool", + "ValidationError", + "field_serializer", + "model_serializer", + "field_validator", + "model_validator", + "validator", +] From c1774d1801c99b1138c784524075fece6a94a67f Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 3 Mar 2026 17:19:47 -0500 Subject: [PATCH 047/109] [ignore] Rename NDBaseSmartEndpoint to NDBaseEndpoint. Fix importation issues. --- plugins/module_utils/endpoints/base.py | 4 +++- .../endpoints/v1/infra_aaa_local_users.py | 4 ++-- plugins/module_utils/orchestrators/base.py | 14 +++++++------- plugins/module_utils/orchestrators/local_user.py | 14 +++++++------- plugins/modules/nd_local_user.py | 5 ++++- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py index bfd59ee1..2d214878 100644 --- a/plugins/module_utils/endpoints/base.py +++ b/plugins/module_utils/endpoints/base.py @@ -23,7 +23,7 @@ Field, ) from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import IdentifierKey +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey class NDEndpointBaseModel(BaseModel, ABC): @@ -132,6 +132,8 @@ def verb(self) -> HttpVerbEnum: None """ + # TODO: Maybe to be modifed to be more Pydantic (low priority) + # TODO: Maybe change function's name (low priority) # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration @abstractmethod def set_identifiers(self, identifier: IdentifierKey = None): diff --git a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py index 1e1d7823..0008b188 100644 --- a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py +++ b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py @@ -18,12 +18,12 @@ from typing import Literal, Final from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin from ansible_collections.cisco.nd.plugins.module_utils.endpoints.enums import VerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint, NDBasePath +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint, NDBasePath from pydantic import Field from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -class _V1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseSmartEndpoint): +class _V1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseEndpoint): """ Base class for ND Infra AAA Local Users endpoints. diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 8c84de8e..b0e34b61 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -12,8 +12,8 @@ from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType # TODO: Revisit naming them "Orchestrator" @@ -28,11 +28,11 @@ class NDBaseOrchestrator(BaseModel): model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] # NOTE: if not defined by subclasses, return an error as they are required - create_endpoint: Type[NDBaseSmartEndpoint] - update_endpoint: Type[NDBaseSmartEndpoint] - delete_endpoint: Type[NDBaseSmartEndpoint] - query_one_endpoint: Type[NDBaseSmartEndpoint] - query_all_endpoint: Type[NDBaseSmartEndpoint] + create_endpoint: Type[NDBaseEndpoint] + update_endpoint: Type[NDBaseEndpoint] + delete_endpoint: Type[NDBaseEndpoint] + query_one_endpoint: Type[NDBaseEndpoint] + query_all_endpoint: Type[NDBaseEndpoint] # NOTE: Module Field is always required # TODO: Replace it with future sender (low priority) diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index bea4a486..5e52a00b 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -12,8 +12,8 @@ from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseSmartEndpoint -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra_aaa_local_users import ( V1InfraAaaLocalUsersPost, V1InfraAaaLocalUsersPut, @@ -25,11 +25,11 @@ class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel - create_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersPost - update_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersPut - delete_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersDelete - query_one_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersGet - query_all_endpoint: Type[NDBaseSmartEndpoint] = V1InfraAaaLocalUsersGet + create_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersPost + update_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersPut + delete_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersDelete + query_one_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersGet + query_all_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersGet def query_all(self) -> ResponseType: """ diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index a6972c07..6f296065 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -198,8 +198,11 @@ def main(): ) # Manage state + # TODO: return module output class object: + # output = nd_state_machine.manage_state() + # module.exit_json(**output) nd_state_machine.manage_state() - + nd_state_machine.exit_json() except Exception as e: From 872b5f41ee5d18ce912563909509ac818e01e090 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 4 Mar 2026 11:12:27 -0500 Subject: [PATCH 048/109] [ignore] Replace all pydantic imports with pydantic_compat. Fix sanity issues. --- plugins/module_utils/constants.py | 4 ++++ .../endpoints/v1/infra_aaa_local_users.py | 2 +- plugins/module_utils/models/base.py | 9 ++++----- plugins/module_utils/models/local_user.py | 19 ++++++++++++------ plugins/module_utils/nd_state_machine.py | 2 +- plugins/module_utils/orchestrators/base.py | 2 +- plugins/module_utils/pydantic_compat.py | 20 ++++++++++++++++++- plugins/modules/nd_local_user.py | 3 ++- 8 files changed, 45 insertions(+), 16 deletions(-) diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index afa0a2b0..563041a0 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -16,6 +16,7 @@ class NDConstantMapping(Dict): def __init__(self, data: Dict): + self.data = data self.new_dict = deepcopy(data) for k, v in data.items(): self.new_dict[v] = k @@ -24,6 +25,9 @@ def __init__(self, data: Dict): def get_dict(self): return self.new_dict + def get_original_data(self): + return list(self.data.keys()) + OBJECT_TYPES = { "tenant": "OST_TENANT", diff --git a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py index 0008b188..d1013e24 100644 --- a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py +++ b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py @@ -19,7 +19,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin from ansible_collections.cisco.nd.plugins.module_utils.endpoints.enums import VerbEnum from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint, NDBasePath -from pydantic import Field +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import Field from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 8cdcc765..67ce5de0 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -9,9 +9,8 @@ __metaclass__ = type from abc import ABC -from pydantic import BaseModel, ConfigDict +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal, Optional -from typing_extensions import Self # TODO: Revisit identifiers strategy (low priority) @@ -82,11 +81,11 @@ def to_config(self, **kwargs) -> Dict[str, Any]: return self.model_dump(by_alias=False, exclude_none=True, **kwargs) @classmethod - def from_response(cls, response: Dict[str, Any], **kwargs) -> Self: + def from_response(cls, response: Dict[str, Any], **kwargs) -> "NDBaseModel": return cls.model_validate(response, by_alias=True, **kwargs) @classmethod - def from_config(cls, ansible_config: Dict[str, Any], **kwargs) -> Self: + def from_config(cls, ansible_config: Dict[str, Any], **kwargs) -> "NDBaseModel": return cls.model_validate(ansible_config, by_name=True, **kwargs) # TODO: Revisit this function when revisiting identifier strategy (low priority) @@ -146,7 +145,7 @@ def to_diff_dict(self, **kwargs) -> Dict[str, Any]: # NOTE: initialize and return a deep copy of the instance? # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? # -> similar to NDCOnfigCollection... -> add argument to make it optional either replace - def merge(self, other_model: "NDBaseModel", **kwargs) -> Self: + def merge(self, other_model: "NDBaseModel", **kwargs) -> "NDBaseModel": if not isinstance(other_model, type(self)): # TODO: Change error message return TypeError("models are not of the same type.") diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index fe2f2bb5..0575c1be 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -9,7 +9,15 @@ __metaclass__ = type from typing import List, Dict, Any, Optional, ClassVar, Literal -from pydantic import Field, SecretStr, model_serializer, field_serializer, field_validator, model_validator, computed_field +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ( + Field, + SecretStr, + model_serializer, + field_serializer, + field_validator, + model_validator, + computed_field, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel from ansible_collections.cisco.nd.plugins.module_utils.constants import NDConstantMapping @@ -24,7 +32,7 @@ "approver": "approver", "designer": "designer", } -).get_dict() +) class LocalUserSecurityDomainModel(NDNestedModel): @@ -38,7 +46,7 @@ class LocalUserSecurityDomainModel(NDNestedModel): @model_serializer() def serialize_model(self) -> Dict: - return {self.name: {"roles": [USER_ROLES_MAPPING.get(role, role) for role in (self.roles or [])]}} + return {self.name: {"roles": [USER_ROLES_MAPPING.get_dict().get(role, role) for role in (self.roles or [])]}} # NOTE: Deserialization defined in `LocalUserModel` due to API response complexity @@ -145,7 +153,7 @@ def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: domains_list = [] for domain_name, domain_data in domains_dict.items(): - domains_list.append({"name": domain_name, "roles": [USER_ROLES_MAPPING.get(role, role) for role in domain_data.get("roles", [])]}) + domains_list.append({"name": domain_name, "roles": [USER_ROLES_MAPPING.get_dict().get(role, role) for role in domain_data.get("roles", [])]}) return domains_list @@ -174,7 +182,7 @@ def get_argument_spec(cls) -> Dict: elements="dict", options=dict( name=dict(type="str", required=True, aliases=["security_domain_name", "domain_name"]), - roles=dict(type="list", elements="str", choices=list(USER_ROLES_MAPPING)), + roles=dict(type="list", elements="str", choices=USER_ROLES_MAPPING.get_original_data()), ), aliases=["domains"], ), @@ -182,6 +190,5 @@ def get_argument_spec(cls) -> Dict: remote_user_authorization=dict(type="bool"), ), ), - override_exceptions=dict(type="list", elements="str"), state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), ) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index e68010fb..81d6a966 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -9,7 +9,7 @@ __metaclass__ = type from typing import Optional, List, Dict, Any, Literal, Type -from pydantic import ValidationError +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index b0e34b61..1a3b1921 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -8,7 +8,7 @@ __metaclass__ = type -from pydantic import BaseModel, ConfigDict +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule diff --git a/plugins/module_utils/pydantic_compat.py b/plugins/module_utils/pydantic_compat.py index f1d90fe3..e8924cd2 100644 --- a/plugins/module_utils/pydantic_compat.py +++ b/plugins/module_utils/pydantic_compat.py @@ -32,12 +32,14 @@ Field, PydanticExperimentalWarning, StrictBool, + SecretStr, ValidationError, field_serializer, model_serializer, field_validator, model_validator, validator, + computed_field, ) else: # Runtime: try to import, with fallback @@ -50,12 +52,14 @@ Field, PydanticExperimentalWarning, StrictBool, + SecretStr, ValidationError, field_serializer, model_serializer, field_validator, model_validator, validator, + computed_field, ) except ImportError: HAS_PYDANTIC = False # pylint: disable=invalid-name @@ -106,7 +110,7 @@ def decorator(func): return func return decorator - + # Fallback: model_serializer decorator that does nothing def model_serializer(*args, **kwargs): # pylint: disable=unused-argument """Pydantic model_serializer fallback when pydantic is not available.""" @@ -125,6 +129,15 @@ def decorator(func): return decorator + # Fallback: computed_field decorator that does nothing + def computed_field(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic computed_field fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + # Fallback: AfterValidator that returns the function unchanged def AfterValidator(func): # pylint: disable=invalid-name """Pydantic AfterValidator fallback when pydantic is not available.""" @@ -141,6 +154,9 @@ def BeforeValidator(func): # pylint: disable=invalid-name # Fallback: StrictBool StrictBool = bool + # Fallback: SecretStr + SecretStr = str + # Fallback: ValidationError class ValidationError(Exception): """ @@ -191,10 +207,12 @@ def decorator(func): "PYDANTIC_IMPORT_ERROR", "PydanticExperimentalWarning", "StrictBool", + "SecretStr", "ValidationError", "field_serializer", "model_serializer", "field_validator", "model_validator", "validator", + "computed_field", ] diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 6f296065..65f2e464 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -27,6 +27,7 @@ - The list of the local users to configure. type: list elements: dict + required: True suboptions: email: description: @@ -202,7 +203,7 @@ def main(): # output = nd_state_machine.manage_state() # module.exit_json(**output) nd_state_machine.manage_state() - + nd_state_machine.exit_json() except Exception as e: From 24c068659bcfba15145ad6d2d86d5d92609b9ce6 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Fri, 6 Mar 2026 13:35:38 -0500 Subject: [PATCH 049/109] [ignore] Add NDOutput class. Modify NDStateMachine and nd_local_user accordingly --- plugins/module_utils/models/base.py | 7 +- plugins/module_utils/models/local_user.py | 3 +- plugins/module_utils/nd_config_collection.py | 3 +- plugins/module_utils/nd_output.py | 70 +++++++ plugins/module_utils/nd_state_machine.py | 186 +++---------------- plugins/module_utils/orchestrators/base.py | 3 - plugins/module_utils/utils.py | 2 +- plugins/modules/nd_local_user.py | 12 +- 8 files changed, 107 insertions(+), 179 deletions(-) create mode 100644 plugins/module_utils/nd_output.py diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 67ce5de0..14c04945 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -143,12 +143,11 @@ def to_diff_dict(self, **kwargs) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), **kwargs) # NOTE: initialize and return a deep copy of the instance? - # TODO: Might be missing a proper merge on fields of type `List[NDNestedModel]`? - # -> similar to NDCOnfigCollection... -> add argument to make it optional either replace def merge(self, other_model: "NDBaseModel", **kwargs) -> "NDBaseModel": if not isinstance(other_model, type(self)): - # TODO: Change error message - return TypeError("models are not of the same type.") + return TypeError( + f"NDBaseModel.merge method requires models of the same type. self of type {type(self)} and other_model of type {type(other_model)}" + ) for field, value in other_model: if value is None: diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 0575c1be..e2e7faf8 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -71,7 +71,6 @@ class LocalUserModel(NDBaseModel): # Fields # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec - # TODO: use extra for generating argument_spec (low priority) login_id: str = Field(alias="loginID") email: Optional[str] = Field(default=None, alias="email") first_name: Optional[str] = Field(default=None, alias="firstName") @@ -161,7 +160,7 @@ def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: # -- Extra -- - # TODO: to generate from Fields (low priority) + # TODO: to generate from Fields: use extra for generating argument_spec (low priority) @classmethod def get_argument_spec(cls) -> Dict: return dict( diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 5fd9886d..1f751822 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -37,7 +37,6 @@ def __init__(self, model_class: ModelType, items: Optional[List[ModelType]] = No for item in items: self.add(item) - # TODO: might not be necessary def _extract_key(self, item: ModelType) -> IdentifierKey: """ Extract identifier key from item. @@ -144,7 +143,7 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha if existing is None: return "new" - # TODO: make a diff class level method for NDBaseModel + # TODO: make a diff class level method for NDBaseModel (high priority) existing_data = existing.to_diff_dict() new_data = new_item.to_diff_dict() is_subset = issubset(new_data, existing_data) diff --git a/plugins/module_utils/nd_output.py b/plugins/module_utils/nd_output.py new file mode 100644 index 00000000..027592df --- /dev/null +++ b/plugins/module_utils/nd_output.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Gaspard Micol (@gmicol) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Dict, Any, Optional, List, Union +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection + + +class NDOutput: + def __init__(self, module: AnsibleModule): + self._output_level: str = module.params.get("output_level", "normal") + self._changed: bool = False + self._before: Union[NDConfigCollection, List] = [] + self._after: Union[NDConfigCollection, List] = [] + self._diff: Union[NDConfigCollection, List] = [] + self._proposed: Union[NDConfigCollection, List] = [] + self._logs: List = [] + self._extra: Dict[str, Any] = {} + + def format(self, **kwargs): + if isinstance(self._before, NDConfigCollection) and isinstance(self._after, NDConfigCollection) and self._before.get_diff_collection(self._after): + self._changed = True + + output = { + "output_level": self._output_level, + "changed": self._changed, + "after": self._after.to_ansible_config() if isinstance(self._after, NDConfigCollection) else self._after, + "before": self._before.to_ansible_config() if isinstance(self._before, NDConfigCollection) else self._before, + "diff": self._diff.to_ansible_config() if isinstance(self._diff, NDConfigCollection) else self._diff, + } + + if self._output_level in ("debug", "info"): + output["proposed"] = self._proposed.to_ansible_config() if isinstance(self._proposed, NDConfigCollection) else self._proposed + if self._output_level == "debug": + output["logs"] = "Not yet implemented" + + if self._extra: + output.update(self._extra) + + output.update(**kwargs) + + return output + + def assign( + self, + after: Optional[NDConfigCollection] = None, + before: Optional[NDConfigCollection] = None, + diff: Optional[NDConfigCollection] = None, + proposed: Optional[NDConfigCollection] = None, + logs: Optional[List] = None, + **kwargs + ) -> None: + if isinstance(after, NDConfigCollection): + self._after = after + if isinstance(before, NDConfigCollection): + self._before = before + if isinstance(diff, NDConfigCollection): + self._diff = diff + if isinstance(proposed, NDConfigCollection): + self._proposed = proposed + if isinstance(logs, List): + self._logs = logs + self._extra.update(**kwargs) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 81d6a966..4146926e 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -8,18 +8,15 @@ __metaclass__ = type -from typing import Optional, List, Dict, Any, Literal, Type +from typing import List, Dict, Any, Type from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator -from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED -# TODO: Revisit StateMachine when there is more arguments than config (e.g., "fabric" and "config" for switches config) -# TODO: Remove inheritence from NDModule (Top Priority) class NDStateMachine: """ Generic Network Resource Module for Nexus Dashboard. @@ -34,29 +31,27 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.nd_module = NDModule(self.module) # Operation tracking - self.nd_logs: List[Dict[str, Any]] = [] - self.result: Dict[str, Any] = {"changed": False} + self.output = NDOutput(self.module) # Configuration self.model_orchestrator = model_orchestrator(sender=self.nd_module) self.model_class = self.model_orchestrator.model_class - # TODO: Revisit these class variables when udpating Module intialization and configuration (medium priority) + # TODO: Revisit these class variables when udpating Module intialization and configuration (low priority) self.state = self.module.params["state"] - self.ansible_config = self.module.params.get("config", []) # Initialize collections - # TODO: Revisit class variables `previous`, `existing`, etc... (medium priority) self.nd_config_collection = NDConfigCollection[self.model_class] try: - init_all_data = self.model_orchestrator.query_all() - - self.existing = self.nd_config_collection.from_api_response(response_data=init_all_data, model_class=self.model_class) - # Save previous state - self.previous = self.existing.copy() - self.proposed = self.nd_config_collection(model_class=self.model_class) + response_data = self.model_orchestrator.query_all() + # State of configuration objects in ND before change execution + self.before = self.nd_config_collection.from_api_response(response_data=response_data, model_class=self.model_class) + # State of current configuration objects in ND during change execution + self.existing = self.before.copy() + # Ongoing collection of configuration objects that were changed self.sent = self.nd_config_collection(model_class=self.model_class) - - for config in self.ansible_config: + # Collection of configuration objects given by user + self.proposed = self.nd_config_collection(model_class=self.model_class) + for config in self.module.params.get("config", []): try: # Parse config into model item = self.model_class.from_config(config) @@ -64,42 +59,11 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest except ValidationError as e: self.fail_json(msg=f"Invalid configuration: {e}", config=config, validation_errors=e.errors()) return - + self.output.assign(after=self.existing, before=self.before, proposed=self.proposed) except Exception as e: - self.fail_json(msg=f"Initialization failed: {str(e)}", error=str(e)) - - # Logging - # NOTE: format log placeholder - # TODO: use a proper logger (low priority) - def format_log( - self, - identifier: IdentifierKey, - operation_status: Literal["no_change", "created", "updated", "deleted"], - before: Optional[Dict[str, Any]] = None, - after: Optional[Dict[str, Any]] = None, - payload: Optional[Dict[str, Any]] = None, - ) -> None: - """ - Create and append a log entry. - """ - log_entry = { - "identifier": identifier, - "operation_status": operation_status, - "before": before, - "after": after, - "payload": payload, - } - - # Add HTTP details if not in check mode - if not self.module.check_mode and self.nd_module.url is not None: - log_entry.update( - {"method": self.nd_module.method, "response": self.nd_module.response, "status": self.nd_module.status, "url": self.nd_module.url} - ) - - self.nd_logs.append(log_entry) + self.fail_json(msg=f"NDStateMachine initialization failed: {str(e)}", error=str(e)) # State Management (core function) - # TODO: adapt all `manage` functions to endpoint/orchestrator strategies (Top priority) def manage_state(self) -> None: """ Manage state according to desired configuration. @@ -114,7 +78,6 @@ def manage_state(self) -> None: elif self.state == "deleted": self._manage_delete_state() - # TODO: not needed with Ansible `argument_spec` validation. Keep it for now but needs to be removed (low priority) # TODO: boil down an Exception instead of using `fail_json` method else: self.fail_json(msg=f"Invalid state: {self.state}") @@ -125,28 +88,19 @@ def _manage_create_update_state(self) -> None: """ for proposed_item in self.proposed: # Extract identifier - response = {} identifier = proposed_item.get_identifier_value() - existing_config = self.existing.get(identifier).to_config() if self.existing.get(identifier) else {} try: # Determine diff status diff_status = self.existing.get_diff_config(proposed_item) # No changes needed if diff_status == "no_diff": - self.format_log( - identifier=identifier, - operation_status="no_change", - before=existing_config, - after=existing_config, - ) continue # Prepare final config based on state if self.state == "merged": # Merge with existing - merged_item = self.existing.merge(proposed_item) - final_item = merged_item + final_item = self.existing.merge(proposed_item) else: # Replace or create if diff_status == "changed": @@ -158,34 +112,18 @@ def _manage_create_update_state(self) -> None: # Execute API operation if diff_status == "changed": if not self.module.check_mode: - response = self.model_orchestrator.update(final_item) + self.model_orchestrator.update(final_item) self.sent.add(final_item) - operation_status = "updated" elif diff_status == "new": if not self.module.check_mode: - response = self.model_orchestrator.create(final_item) + self.model_orchestrator.create(final_item) self.sent.add(final_item) - operation_status = "created" # Log operation - self.format_log( - identifier=identifier, - operation_status=operation_status, - before=existing_config, - after=self.model_class.model_validate(response).to_config() if not self.module.check_mode else final_item.to_config(), - payload=final_item.to_payload(), - ) + self.output.assign(after=self.existing) except Exception as e: error_msg = f"Failed to process {identifier}: {e}" - - self.format_log( - identifier=identifier, - operation_status="no_change", - before=existing_config, - after=existing_config, - ) - if not self.module.params.get("ignore_errors", False): self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) return @@ -194,7 +132,7 @@ def _manage_override_deletions(self) -> None: """ Delete items not in proposed config (for overridden state). """ - diff_identifiers = self.previous.get_diff_identifiers(self.proposed) + diff_identifiers = self.before.get_diff_identifiers(self.proposed) for identifier in diff_identifiers: try: @@ -204,18 +142,13 @@ def _manage_override_deletions(self) -> None: # Execute delete if not self.module.check_mode: - response = self.model_orchestrator.delete(existing_item) + self.model_orchestrator.delete(existing_item) # Remove from collection self.existing.delete(identifier) # Log deletion - self.format_log( - identifier=identifier, - operation_status="deleted", - before=existing_item.to_config(), - after={}, - ) + self.output.assign(after=self.existing) except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" @@ -232,29 +165,17 @@ def _manage_delete_state(self) -> None: existing_item = self.existing.get(identifier) if not existing_item: - # Already deleted or doesn't exist - self.format_log( - identifier=identifier, - operation_status="no_change", - before={}, - after={}, - ) continue # Execute delete if not self.module.check_mode: - response = self.model_orchestrator.delete(existing_item) + self.model_orchestrator.delete(existing_item) # Remove from collection self.existing.delete(identifier) # Log deletion - self.format_log( - identifier=identifier, - operation_status="deleted", - before=existing_item.to_config(), - after={}, - ) + self.output.assign(after=self.existing) except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" @@ -263,67 +184,10 @@ def _manage_delete_state(self) -> None: self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) return - # Output Formatting - # TODO: move to separate Class (results) -> align it with rest_send PR - # TODO: return a defined ordered list of config (for integration test) - def add_logs_and_outputs(self) -> None: - """Add logs and outputs to module result based on output_level.""" - output_level = self.module.params.get("output_level", "normal") - state = self.module.params.get("state") - - # Add previous state for certain states and output levels - if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - if output_level in ("debug", "info"): - self.result["previous"] = self.previous.to_ansible_config() - - # Check if there were changes - if self.previous.get_diff_collection(self.existing): - self.result["changed"] = True - - # Add stdout if present - if self.nd_module.stdout: - self.result["stdout"] = self.nd_module.stdout - - # Add debug information - if output_level == "debug": - self.result["nd_logs"] = self.nd_logs - - if self.nd_module.url is not None: - self.result["httpapi_logs"] = self.nd_module.httpapi_logs - - if state in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - self.result["sent"] = self.sent.to_payload_list() - self.result["proposed"] = self.proposed.to_ansible_config() - - # Always include current state - self.result["current"] = self.existing.to_ansible_config() - # Module Exit Methods def fail_json(self, msg: str, **kwargs) -> None: """ Exit module with failure. """ - self.add_logs_and_outputs() - self.result.update(**kwargs) - self.module.fail_json(msg=msg, **self.result) - - def exit_json(self, **kwargs) -> None: - """ - Exit module successfully. - """ - self.add_logs_and_outputs() - - # Add diff if module supports it - if self.module._diff and self.result.get("changed") is True: - try: - # Use diff-safe dicts (excludes sensitive fields) - before = [item.to_diff_dict() for item in self.previous] - after = [item.to_diff_dict() for item in self.existing] - - self.result["diff"] = dict(before=before, after=after) - except Exception: - pass # Don't fail on diff generation - - self.result.update(**kwargs) - self.module.exit_json(**self.result) + self.module.fail_json(msg=msg) diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 1a3b1921..1a8b4f10 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -16,7 +16,6 @@ from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType -# TODO: Revisit naming them "Orchestrator" class NDBaseOrchestrator(BaseModel): model_config = ConfigDict( use_enum_values=True, @@ -40,7 +39,6 @@ class NDBaseOrchestrator(BaseModel): # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") # TODO: Explore new ways to make them even more general -> e.g., create a general API operation function (low priority) - # TODO: Revisit Deserialization def create(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: api_endpoint = self.create_endpoint() @@ -72,7 +70,6 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: except Exception as e: raise Exception(f"Query failed for {model_instance.get_identifier_value()}: {e}") from e - # TODO: Revisit the straegy around the query_all (see local_user's case) def query_all(self, model_instance: Optional[NDBaseModel] = None, **kwargs) -> ResponseType: try: result = self.sender.query_obj(self.query_all_endpoint.path) diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 0bf7cfc8..e09bd499 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -56,7 +56,7 @@ def issubset(subset: Any, superset: Any) -> bool: return True -# TODO: Might not necessary with Pydantic validation and serialization built-in methods +# TODO: Might not necessary with Pydantic validation and serialization built-in methods (see models/local_user) def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: """Remove unwanted keys from dict (supports nested paths).""" data = deepcopy(data) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 65f2e464..d1d871fe 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -128,10 +128,10 @@ reuse_limitation: 20 time_interval_limitation: 10 security_domains: - name: all - roles: - - observer - - support_engineer + - name: all + roles: + - observer + - support_engineer remote_id_claim: remote_user remote_user_authorization: true state: merged @@ -204,10 +204,10 @@ def main(): # module.exit_json(**output) nd_state_machine.manage_state() - nd_state_machine.exit_json() + module.exit_json(**nd_state_machine.output.format()) except Exception as e: - module.fail_json(msg=f"Module execution failed: {str(e)}") + module.fail_json(msg=f"Module execution failed: {str(e)}", **nd_state_machine.output.format()) if __name__ == "__main__": From dedc9588a10fddd6ca8753b2ddbb85590a194a63 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 10 Mar 2026 13:36:50 -0400 Subject: [PATCH 050/109] [ignore] Update NDOutput class. Remove all fail_json dependencies in NDStateMachineand add custom Exception for it in common/exceptions dir. Set json mode for to_diff_dict method in NDBaseModel. --- plugins/module_utils/common/exceptions.py | 11 ++++++-- plugins/module_utils/models/base.py | 4 +-- plugins/module_utils/nd_output.py | 7 +++-- plugins/module_utils/nd_state_machine.py | 32 +++++++---------------- 4 files changed, 23 insertions(+), 31 deletions(-) diff --git a/plugins/module_utils/common/exceptions.py b/plugins/module_utils/common/exceptions.py index 16e31ac6..0d7b7bcc 100644 --- a/plugins/module_utils/common/exceptions.py +++ b/plugins/module_utils/common/exceptions.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) """ @@ -144,3 +143,11 @@ def to_dict(self) -> dict[str, Any]: - None """ return self.error_data.model_dump(exclude_none=True) + + +class NDStateMachineError(Exception): + """ + Raised when NDStateMachine is failing. + """ + + pass diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 14c04945..30e5de5e 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -72,7 +72,7 @@ def to_payload(self, **kwargs) -> Dict[str, Any]: """ Convert model to API payload format. """ - return self.model_dump(by_alias=True, exclude_none=True, **kwargs) + return self.model_dump(by_alias=True, exclude_none=True, mode="json", **kwargs) def to_config(self, **kwargs) -> Dict[str, Any]: """ @@ -140,7 +140,7 @@ def to_diff_dict(self, **kwargs) -> Dict[str, Any]: """ Export for diff comparison (excludes sensitive fields). """ - return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), **kwargs) + return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), mode="json", **kwargs) # NOTE: initialize and return a deep copy of the instance? def merge(self, other_model: "NDBaseModel", **kwargs) -> "NDBaseModel": diff --git a/plugins/module_utils/nd_output.py b/plugins/module_utils/nd_output.py index 027592df..dbfc2cd2 100644 --- a/plugins/module_utils/nd_output.py +++ b/plugins/module_utils/nd_output.py @@ -9,13 +9,12 @@ __metaclass__ = type from typing import Dict, Any, Optional, List, Union -from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection class NDOutput: - def __init__(self, module: AnsibleModule): - self._output_level: str = module.params.get("output_level", "normal") + def __init__(self, output_level: str): + self._output_level: str = output_level self._changed: bool = False self._before: Union[NDConfigCollection, List] = [] self._after: Union[NDConfigCollection, List] = [] @@ -24,7 +23,7 @@ def __init__(self, module: AnsibleModule): self._logs: List = [] self._extra: Dict[str, Any] = {} - def format(self, **kwargs): + def format(self, **kwargs) -> Dict[str, Any]: if isinstance(self._before, NDConfigCollection) and isinstance(self._after, NDConfigCollection) and self._before.get_diff_collection(self._after): self._changed = True diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 4146926e..bd86da3c 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -8,13 +8,14 @@ __metaclass__ = type -from typing import List, Dict, Any, Type +from typing import Type from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDStateMachineError class NDStateMachine: @@ -31,7 +32,7 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.nd_module = NDModule(self.module) # Operation tracking - self.output = NDOutput(self.module) + self.output = NDOutput(output_level=module.params.get("output_level", "normal")) # Configuration self.model_orchestrator = model_orchestrator(sender=self.nd_module) @@ -57,11 +58,10 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest item = self.model_class.from_config(config) self.proposed.add(item) except ValidationError as e: - self.fail_json(msg=f"Invalid configuration: {e}", config=config, validation_errors=e.errors()) - return + raise NDStateMachineError(f"Invalid configuration. for config {config}: {str(e)}") self.output.assign(after=self.existing, before=self.before, proposed=self.proposed) except Exception as e: - self.fail_json(msg=f"NDStateMachine initialization failed: {str(e)}", error=str(e)) + raise NDStateMachineError(f"Initialization failed: {str(e)}") # State Management (core function) def manage_state(self) -> None: @@ -78,9 +78,8 @@ def manage_state(self) -> None: elif self.state == "deleted": self._manage_delete_state() - # TODO: boil down an Exception instead of using `fail_json` method else: - self.fail_json(msg=f"Invalid state: {self.state}") + raise NDStateMachineError(f"Invalid state: {self.state}") def _manage_create_update_state(self) -> None: """ @@ -125,8 +124,7 @@ def _manage_create_update_state(self) -> None: except Exception as e: error_msg = f"Failed to process {identifier}: {e}" if not self.module.params.get("ignore_errors", False): - self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) - return + raise NDStateMachineError(error_msg) def _manage_override_deletions(self) -> None: """ @@ -152,10 +150,8 @@ def _manage_override_deletions(self) -> None: except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" - if not self.module.params.get("ignore_errors", False): - self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) - return + raise NDStateMachineError(error_msg) def _manage_delete_state(self) -> None: """Handle deleted state.""" @@ -179,15 +175,5 @@ def _manage_delete_state(self) -> None: except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" - if not self.module.params.get("ignore_errors", False): - self.fail_json(msg=error_msg, identifier=str(identifier), error=str(e)) - return - - # Module Exit Methods - - def fail_json(self, msg: str, **kwargs) -> None: - """ - Exit module with failure. - """ - self.module.fail_json(msg=msg) + raise NDStateMachineError(error_msg) From 2d472d9c051c235ac9c8584c543519a3966e1dcf Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 11 Mar 2026 11:48:38 -0400 Subject: [PATCH 051/109] [ignore] Fix serialization of model with minimal changes to base.py and local_user.py. Add method to NDBaseModel and apply relevant changes to nd_config_collection. --- plugins/module_utils/models/base.py | 211 ++++++++++++------- plugins/module_utils/models/local_user.py | 180 ++++++++++------ plugins/module_utils/nd_config_collection.py | 5 +- plugins/module_utils/pydantic_compat.py | 6 + 4 files changed, 256 insertions(+), 146 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 30e5de5e..79f9ec80 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -9,154 +9,221 @@ __metaclass__ = type from abc import ABC -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict -from typing import List, Dict, Any, ClassVar, Tuple, Union, Literal, Optional +from pydantic import BaseModel, ConfigDict +from typing import List, Dict, Any, ClassVar, Set, Tuple, Union, Literal, Optional +from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset -# TODO: Revisit identifiers strategy (low priority) -# NOTE: what about List of NestedModels? -> make it a separate Sub Model class NDBaseModel(BaseModel, ABC): """ Base model for all Nexus Dashboard API objects. - Supports three identifier strategies: - - single: One unique required field (e.g., ["login_id"]) - - composite: Multiple required fields as tuple (e.g., ["device", "interface"]) - - hierarchical: Priority-ordered fields (e.g., ["uuid", "name"]) - - singleton: no identifiers required (e.g., only a single instance can exist in Nexus Dasboard) + Class-level configuration attributes: + identifiers: List of field names used to uniquely identify this object. + identifier_strategy: How identifiers are interpreted. + exclude_from_diff: Fields excluded from diff comparisons. + unwanted_keys: Keys to strip from API responses before processing. + payload_nested_fields: Mapping of {payload_key: [field_names]} for fields + that should be grouped under a nested key in payload mode but remain + flat in config mode. + payload_exclude_fields: Fields to exclude from payload output + (e.g., because they are restructured into nested keys). + config_exclude_fields: Fields to exclude from config output + (e.g., computed payload-only structures). """ - # TODO: revisit initial Model Configurations (low priority) model_config = ConfigDict( str_strip_whitespace=True, use_enum_values=True, validate_assignment=True, populate_by_name=True, arbitrary_types_allowed=True, - extra="allow", # NOTE: enabled extra: allows to add extra Field infos for generating Ansible argument_spec and Module Docs + extra="ignore", ) - # TODO: Revisit identifiers strategy (low priority) + # --- Identifier Configuration --- + identifiers: ClassVar[Optional[List[str]]] = None - # TODO: Revisit no identifiers strategy naming (`singleton` -> `unique`, `unnamed`) (low priority) identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - # Optional: fields to exclude from diffs (e.g., passwords) - exclude_from_diff: ClassVar[List] = [] - # TODO: To be removed in the future (see local_user model) + # --- Serialization Configuration --- + + exclude_from_diff: ClassVar[Set[str]] = set() unwanted_keys: ClassVar[List] = [] - # TODO: Revisit it with identifiers strategy (low priority) + # Declarative nested-field grouping for payload mode + # e.g., {"passwordPolicy": ["reuse_limitation", "time_interval_limitation"]} + # means: in payload mode, remove these fields from top level and nest them + # under "passwordPolicy" with their alias names. + payload_nested_fields: ClassVar[Dict[str, List[str]]] = {} + + # Fields to explicitly exclude per mode + payload_exclude_fields: ClassVar[Set[str]] = set() + config_exclude_fields: ClassVar[Set[str]] = set() + + # --- Subclass Validation --- + def __init_subclass__(cls, **kwargs): - """ - Enforce configuration for identifiers definition. - """ super().__init_subclass__(**kwargs) # Skip enforcement for nested models - if cls.__name__ in ["NDNestedModel"] or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): + if cls.__name__ == "NDNestedModel" or any(base.__name__ == "NDNestedModel" for base in cls.__mro__): return if not hasattr(cls, "identifiers") or cls.identifiers is None: - raise ValueError( - f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." - f"Example: `identifiers: ClassVar[Optional[List[str]]] = ['login_id']`" - ) + raise ValueError(f"Class {cls.__name__} must define 'identifiers'. " f"Example: identifiers: ClassVar[Optional[List[str]]] = ['login_id']") if not hasattr(cls, "identifier_strategy") or cls.identifier_strategy is None: - raise ValueError( - f"Class {cls.__name__} must define 'identifiers' and 'identifier_strategy'." - f"Example: `identifier_strategy: ClassVar[Optional[Literal['single', 'composite', 'hierarchical', 'singleton']]] = 'single'`" - ) + raise ValueError(f"Class {cls.__name__} must define 'identifier_strategy'. " f"Example: identifier_strategy: ClassVar[...] = 'single'") - def to_payload(self, **kwargs) -> Dict[str, Any]: + # --- Core Serialization --- + + def _build_payload_nested(self, data: Dict[str, Any]) -> Dict[str, Any]: """ - Convert model to API payload format. + Apply payload_nested_fields: pull specified fields out of the top-level + dict and group them under their declared parent key. """ - return self.model_dump(by_alias=True, exclude_none=True, mode="json", **kwargs) + if not self.payload_nested_fields: + return data + + result = dict(data) + + for nested_key, field_names in self.payload_nested_fields.items(): + nested_dict = {} + for field_name in field_names: + # Resolve the alias for this field + field_info = self.__class__.model_fields.get(field_name) + if field_info is None: + continue + + alias = field_info.alias or field_name + + # Pull value from the serialized data (which uses aliases in payload mode) + if alias in result: + nested_dict[alias] = result.pop(alias) + + if nested_dict: + result[nested_key] = nested_dict + + return result + + def to_payload(self, **kwargs) -> Dict[str, Any]: + """Convert model to API payload format (aliased keys, nested structures).""" + data = self.model_dump( + by_alias=True, + exclude_none=True, + mode="json", + context={"mode": "payload"}, + exclude=self.payload_exclude_fields or None, + **kwargs, + ) + return self._build_payload_nested(data) def to_config(self, **kwargs) -> Dict[str, Any]: - """ - Convert model to Ansible config format. - """ - return self.model_dump(by_alias=False, exclude_none=True, **kwargs) + """Convert model to Ansible config format (Python field names, flat structure).""" + return self.model_dump( + by_alias=False, + exclude_none=True, + context={"mode": "config"}, + exclude=self.config_exclude_fields or None, + **kwargs, + ) + + # --- Core Deserialization --- @classmethod def from_response(cls, response: Dict[str, Any], **kwargs) -> "NDBaseModel": + """Create model instance from API response dict.""" return cls.model_validate(response, by_alias=True, **kwargs) @classmethod def from_config(cls, ansible_config: Dict[str, Any], **kwargs) -> "NDBaseModel": + """Create model instance from Ansible config dict.""" return cls.model_validate(ansible_config, by_name=True, **kwargs) - # TODO: Revisit this function when revisiting identifier strategy (low priority) - def get_identifier_value(self, **kwargs) -> Union[str, int, Tuple[Any, ...]]: + # --- Identifier Access --- + + def get_identifier_value(self) -> Optional[Union[str, int, Tuple[Any, ...]]]: """ - Extract identifier value(s) from this instance: - - single identifier: Returns field value. - - composite identifiers: Returns tuple of all field values. - - hierarchical identifiers: Returns tuple of (field_name, value) for first non-None field. + Extract identifier value(s) based on the configured strategy. + + Returns: + - single: The field value + - composite: Tuple of all field values + - hierarchical: Tuple of (field_name, value) for first non-None field + - singleton: None """ - if not self.identifiers and self.identifier_strategy != "singleton": - raise ValueError(f"{self.__class__.__name__} must have identifiers defined with its current identifier strategy: `{self.identifier_strategy}`") + strategy = self.identifier_strategy - if self.identifier_strategy == "single": + if strategy == "singleton": + return None + + if not self.identifiers: + raise ValueError(f"{self.__class__.__name__} has strategy '{strategy}' but no identifiers defined.") + + if strategy == "single": value = getattr(self, self.identifiers[0], None) if value is None: raise ValueError(f"Single identifier field '{self.identifiers[0]}' is None") return value - elif self.identifier_strategy == "composite": + elif strategy == "composite": values = [] missing = [] - for field in self.identifiers: value = getattr(self, field, None) if value is None: missing.append(field) values.append(value) - - # NOTE: might be redefined with Pydantic (low priority) if missing: raise ValueError(f"Composite identifier fields {missing} are None. " f"All required: {self.identifiers}") - return tuple(values) - elif self.identifier_strategy == "hierarchical": + elif strategy == "hierarchical": for field in self.identifiers: value = getattr(self, field, None) if value is not None: return (field, value) - raise ValueError(f"No non-None value in hierarchical fields {self.identifiers}") - # TODO: Revisit condition when there is no identifiers (low priority) - elif self.identifier_strategy == "singleton": - return None - else: - raise ValueError(f"Unknown identifier strategy: {self.identifier_strategy}") + raise ValueError(f"Unknown identifier strategy: {strategy}") + + # --- Diff & Merge --- def to_diff_dict(self, **kwargs) -> Dict[str, Any]: + """Export for diff comparison, excluding sensitive fields.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude=self.exclude_from_diff or None, + mode="json", + **kwargs, + ) + + def get_diff(self, other: "NDBaseModel") -> bool: + """Diff comparison.""" + self_data = self.to_diff_dict() + other_data = other.to_diff_dict() + return issubset(other_data, self_data) + + def merge(self, other: "NDBaseModel") -> "NDBaseModel": """ - Export for diff comparison (excludes sensitive fields). - """ - return self.model_dump(by_alias=True, exclude_none=True, exclude=set(self.exclude_from_diff), mode="json", **kwargs) + Merge another model's non-None values into this instance. + Recursively merges nested NDBaseModel fields. - # NOTE: initialize and return a deep copy of the instance? - def merge(self, other_model: "NDBaseModel", **kwargs) -> "NDBaseModel": - if not isinstance(other_model, type(self)): - return TypeError( - f"NDBaseModel.merge method requires models of the same type. self of type {type(self)} and other_model of type {type(other_model)}" - ) + Returns self for chaining. + """ + if not isinstance(other, type(self)): + raise TypeError(f"Cannot merge {type(other).__name__} into {type(self).__name__}. " f"Both must be the same type.") - for field, value in other_model: + for field_name, value in other: if value is None: continue - current_value = getattr(self, field) - if isinstance(current_value, NDBaseModel) and isinstance(value, NDBaseModel): - setattr(self, field, current_value.merge(value)) - + current = getattr(self, field_name) + if isinstance(current, NDBaseModel) and isinstance(value, NDBaseModel): + current.merge(value) else: - setattr(self, field, value) + setattr(self, field_name, value) + return self diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index e2e7faf8..0320d3c1 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -16,13 +16,14 @@ field_serializer, field_validator, model_validator, - computed_field, + FieldSerializationInfo, + SerializationInfo, ) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel from ansible_collections.cisco.nd.plugins.module_utils.constants import NDConstantMapping -# Constant defined here as it is only used in this model + USER_ROLES_MAPPING = NDConstantMapping( { "fabric_admin": "fabric-admin", @@ -36,131 +37,155 @@ class LocalUserSecurityDomainModel(NDNestedModel): - """Security domain configuration for local user (nested model).""" + """ + Security domain with assigned roles for a local user. - # Fields - name: str = Field(alias="name", exclude=True) - roles: Optional[List[str]] = Field(default=None, alias="roles", exclude=True) + Canonical form (config): {"name": "all", "roles": ["observer", "support_engineer"]} + API payload form: {"all": {"roles": ["observer", "support-engineer"]}} + """ - # -- Serialization (Model instance -> API payload) -- + name: str = Field(alias="name") + roles: Optional[List[str]] = Field(default=None, alias="roles") @model_serializer() - def serialize_model(self) -> Dict: - return {self.name: {"roles": [USER_ROLES_MAPPING.get_dict().get(role, role) for role in (self.roles or [])]}} + def serialize(self, info: SerializationInfo) -> Any: + mode = (info.context or {}).get("mode", "payload") - # NOTE: Deserialization defined in `LocalUserModel` due to API response complexity + if mode == "config": + result = {"name": self.name} + if self.roles is not None: + result["roles"] = list(self.roles) + return result + + # Payload mode: nested dict with API role names + api_roles = [USER_ROLES_MAPPING.get_dict().get(role, role) for role in (self.roles or [])] + return {self.name: {"roles": api_roles}} -# TODO: Add field validation (e.g. me, le, choices, etc...) (low priority) class LocalUserModel(NDBaseModel): """ - Local user configuration. + Local user configuration for Nexus Dashboard. + + Identifier: login_id (single) - Identifier: login_id (single field) + Serialization notes: + - In payload mode, `reuse_limitation` and `time_interval_limitation` + are nested under `passwordPolicy` (handled by base class via + `payload_nested_fields`). + - In config mode, they remain as flat top-level fields. + - `security_domains` serializes as a nested dict in payload mode + and a flat list of dicts in config mode. """ - # Identifier configuration - # TODO: Revisit this identifiers strategy (low priority) + # --- Identifier Configuration --- + identifiers: ClassVar[Optional[List[str]]] = ["login_id"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - # Keys management configurations - # TODO: Revisit these configurations (low priority) - exclude_from_diff: ClassVar[List[str]] = ["user_password"] - unwanted_keys: ClassVar[List] = [["passwordPolicy", "passwordChangeTime"], ["userID"]] # Nested path # Simple key + # --- Serialization Configuration --- + + exclude_from_diff: ClassVar[set] = {"user_password"} + unwanted_keys: ClassVar[List] = [ + ["passwordPolicy", "passwordChangeTime"], + ["userID"], + ] + + # In payload mode, nest these fields under "passwordPolicy" + payload_nested_fields: ClassVar[Dict[str, List[str]]] = { + "passwordPolicy": ["reuse_limitation", "time_interval_limitation"], + } + + # --- Fields --- - # Fields - # NOTE: `alias` are NOT the ansible aliases. they are the equivalent attribute's names from the API spec login_id: str = Field(alias="loginID") email: Optional[str] = Field(default=None, alias="email") first_name: Optional[str] = Field(default=None, alias="firstName") last_name: Optional[str] = Field(default=None, alias="lastName") user_password: Optional[SecretStr] = Field(default=None, alias="password") - reuse_limitation: Optional[int] = Field(default=None, alias="reuseLimitation", exclude=True) - time_interval_limitation: Optional[int] = Field(default=None, alias="timeIntervalLimitation", exclude=True) + reuse_limitation: Optional[int] = Field(default=None, alias="reuseLimitation") + time_interval_limitation: Optional[int] = Field(default=None, alias="timeIntervalLimitation") security_domains: Optional[List[LocalUserSecurityDomainModel]] = Field(default=None, alias="rbac") remote_id_claim: Optional[str] = Field(default=None, alias="remoteIDClaim") remote_user_authorization: Optional[bool] = Field(default=None, alias="xLaunch") - # -- Serialization (Model instance -> API payload) -- - - @computed_field(alias="passwordPolicy") - @property - def password_policy(self) -> Optional[Dict[str, int]]: - """Computed nested structure for API payload.""" - if self.reuse_limitation is None and self.time_interval_limitation is None: - return None - - policy = {} - if self.reuse_limitation is not None: - policy["reuseLimitation"] = self.reuse_limitation - if self.time_interval_limitation is not None: - policy["timeIntervalLimitation"] = self.time_interval_limitation - return policy + # --- Serializers --- @field_serializer("user_password") def serialize_password(self, value: Optional[SecretStr]) -> Optional[str]: return value.get_secret_value() if value else None @field_serializer("security_domains") - def serialize_domains(self, value: Optional[List[LocalUserSecurityDomainModel]]) -> Optional[Dict]: - # NOTE: exclude `None` values and empty list (-> should we exclude empty list?) + def serialize_security_domains( + self, + value: Optional[List[LocalUserSecurityDomainModel]], + info: FieldSerializationInfo, + ) -> Any: if not value: return None + mode = (info.context or {}).get("mode", "payload") + + if mode == "config": + return [domain.model_dump(context=info.context) for domain in value] + + # Payload mode: merge all domain dicts into {"domains": {...}} domains_dict = {} for domain in value: - domains_dict.update(domain.to_payload()) - + domains_dict.update(domain.model_dump(context=info.context)) return {"domains": domains_dict} - # -- Deserialization (API response / Ansible payload -> Model instance) -- + # --- Validators (Deserialization) --- @model_validator(mode="before") @classmethod - def deserialize_password_policy(cls, data: Any) -> Any: + def flatten_password_policy(cls, data: Any) -> Any: + """ + Flatten nested passwordPolicy from API response into top-level fields. + This is the inverse of the payload_nested_fields nesting. + """ if not isinstance(data, dict): return data - password_policy = data.get("passwordPolicy") - - if password_policy and isinstance(password_policy, dict): - if "reuseLimitation" in password_policy: - data["reuse_limitation"] = password_policy["reuseLimitation"] - if "timeIntervalLimitation" in password_policy: - data["time_interval_limitation"] = password_policy["timeIntervalLimitation"] - - # Remove the nested structure from data to avoid conflicts - # (since it's a computed field, not a real field) - data.pop("passwordPolicy", None) + policy = data.pop("passwordPolicy", None) + if isinstance(policy, dict): + if "reuseLimitation" in policy: + data.setdefault("reuseLimitation", policy["reuseLimitation"]) + if "timeIntervalLimitation" in policy: + data.setdefault("timeIntervalLimitation", policy["timeIntervalLimitation"]) return data @field_validator("security_domains", mode="before") @classmethod - def deserialize_domains(cls, value: Any) -> Optional[List[Dict]]: + def normalize_security_domains(cls, value: Any) -> Optional[List[Dict]]: + """ + Accept security_domains in either format: + - List of dicts (Ansible config): [{"name": "all", "roles": [...]}] + - Nested dict (API response): {"domains": {"all": {"roles": [...]}}} + Always normalizes to the list-of-dicts form for model storage. + """ if value is None: return None - # If already in list format (Ansible module representation), return as-is + # Already normalized (from Ansible config) if isinstance(value, list): return value - # If in the nested dict format (API representation) + # API response format if isinstance(value, dict) and "domains" in value: - domains_dict = value["domains"] - domains_list = [] - - for domain_name, domain_data in domains_dict.items(): - domains_list.append({"name": domain_name, "roles": [USER_ROLES_MAPPING.get_dict().get(role, role) for role in domain_data.get("roles", [])]}) - - return domains_list + reverse_mapping = {v: k for k, v in USER_ROLES_MAPPING.get_dict().items()} + return [ + { + "name": domain_name, + "roles": [reverse_mapping.get(role, role) for role in domain_data.get("roles", [])], + } + for domain_name, domain_data in value["domains"].items() + ] return value - # -- Extra -- + # --- Argument Spec --- - # TODO: to generate from Fields: use extra for generating argument_spec (low priority) @classmethod def get_argument_spec(cls) -> Dict: return dict( @@ -180,8 +205,19 @@ def get_argument_spec(cls) -> Dict: type="list", elements="dict", options=dict( - name=dict(type="str", required=True, aliases=["security_domain_name", "domain_name"]), - roles=dict(type="list", elements="str", choices=USER_ROLES_MAPPING.get_original_data()), + name=dict( + type="str", + required=True, + aliases=[ + "security_domain_name", + "domain_name", + ], + ), + roles=dict( + type="list", + elements="str", + choices=USER_ROLES_MAPPING.get_original_data(), + ), ), aliases=["domains"], ), @@ -189,5 +225,9 @@ def get_argument_spec(cls) -> Dict: remote_user_authorization=dict(type="bool"), ), ), - state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), + state=dict( + type="str", + default="merged", + choices=["merged", "replaced", "overridden", "deleted"], + ), ) diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 1f751822..364b8a8f 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -143,10 +143,7 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha if existing is None: return "new" - # TODO: make a diff class level method for NDBaseModel (high priority) - existing_data = existing.to_diff_dict() - new_data = new_item.to_diff_dict() - is_subset = issubset(new_data, existing_data) + is_subset = existing.get_diff(new_item) return "no_diff" if is_subset else "changed" diff --git a/plugins/module_utils/pydantic_compat.py b/plugins/module_utils/pydantic_compat.py index e8924cd2..4456018a 100644 --- a/plugins/module_utils/pydantic_compat.py +++ b/plugins/module_utils/pydantic_compat.py @@ -40,6 +40,8 @@ model_validator, validator, computed_field, + FieldSerializationInfo, + SerializationInfo, ) else: # Runtime: try to import, with fallback @@ -60,6 +62,8 @@ model_validator, validator, computed_field, + FieldSerializationInfo, + SerializationInfo, ) except ImportError: HAS_PYDANTIC = False # pylint: disable=invalid-name @@ -215,4 +219,6 @@ def decorator(func): "model_validator", "validator", "computed_field", + "FieldSerializationInfo", + "SerializationInfo", ] From de81d560c591d3bd2f8802708345495ea6b79872 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 11 Mar 2026 13:56:54 -0400 Subject: [PATCH 052/109] [ignore] Complete nd_local_user integration test for creation and update asserts. --- .../targets/nd_local_user/tasks/main.yml | 296 +++++++++++++++++- 1 file changed, 288 insertions(+), 8 deletions(-) diff --git a/tests/integration/targets/nd_local_user/tasks/main.yml b/tests/integration/targets/nd_local_user/tasks/main.yml index 77e55cd1..de8ad5ed 100644 --- a/tests/integration/targets/nd_local_user/tasks/main.yml +++ b/tests/integration/targets/nd_local_user/tasks/main.yml @@ -46,15 +46,125 @@ - name: all state: merged check_mode: true - register: cm_create_local_user + register: cm_create_local_users - name: Create local users with full and minimum configuration (normal mode) cisco.nd.nd_local_user: <<: *create_local_user - register: nm_create_local_user + register: nm_create_local_users + +- name: Asserts for local users creation tasks + ansible.builtin.assert: + that: + - cm_create_local_users is changed + - cm_create_local_users.after | length == 3 + - cm_create_local_users.after.0.login_id == "admin" + - cm_create_local_users.after.0.first_name == "admin" + - cm_create_local_users.after.0.remote_user_authorization == false + - cm_create_local_users.after.0.reuse_limitation == 0 + - cm_create_local_users.after.0.security_domains | length == 1 + - cm_create_local_users.after.0.security_domains.0.name == "all" + - cm_create_local_users.after.0.security_domains.0.roles | length == 1 + - cm_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - cm_create_local_users.after.0.time_interval_limitation == 0 + - cm_create_local_users.after.1.email == "ansibleuser@example.com" + - cm_create_local_users.after.1.first_name == "Ansible first name" + - cm_create_local_users.after.1.last_name == "Ansible last name" + - cm_create_local_users.after.1.login_id == "ansible_local_user" + - cm_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - cm_create_local_users.after.1.remote_user_authorization == true + - cm_create_local_users.after.1.reuse_limitation == 20 + - cm_create_local_users.after.1.security_domains | length == 1 + - cm_create_local_users.after.1.security_domains.0.name == "all" + - cm_create_local_users.after.1.security_domains.0.roles | length == 2 + - cm_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - cm_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - cm_create_local_users.after.1.time_interval_limitation == 10 + - cm_create_local_users.after.2.login_id == "ansible_local_user_2" + - cm_create_local_users.after.2.security_domains | length == 1 + - cm_create_local_users.after.2.security_domains.0.name == "all" + - cm_create_local_users.before | length == 1 + - cm_create_local_users.before.0.login_id == "admin" + - cm_create_local_users.before.0.first_name == "admin" + - cm_create_local_users.before.0.remote_user_authorization == false + - cm_create_local_users.before.0.reuse_limitation == 0 + - cm_create_local_users.before.0.security_domains | length == 1 + - cm_create_local_users.before.0.security_domains.0.name == "all" + - cm_create_local_users.before.0.security_domains.0.roles | length == 1 + - cm_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - cm_create_local_users.before.0.time_interval_limitation == 0 + - cm_create_local_users.diff == [] + - cm_create_local_users.proposed.0.email == "ansibleuser@example.com" + - cm_create_local_users.proposed.0.first_name == "Ansible first name" + - cm_create_local_users.proposed.0.last_name == "Ansible last name" + - cm_create_local_users.proposed.0.login_id == "ansible_local_user" + - cm_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_create_local_users.proposed.0.remote_user_authorization == true + - cm_create_local_users.proposed.0.reuse_limitation == 20 + - cm_create_local_users.proposed.0.security_domains | length == 1 + - cm_create_local_users.proposed.0.security_domains.0.name == "all" + - cm_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - cm_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - cm_create_local_users.proposed.0.time_interval_limitation == 10 + - cm_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - cm_create_local_users.proposed.1.security_domains | length == 1 + - cm_create_local_users.proposed.1.security_domains.0.name == "all" + - nm_create_local_users is changed + - nm_create_local_users.after.0.first_name == "admin" + - nm_create_local_users.after.0.remote_user_authorization == false + - nm_create_local_users.after.0.reuse_limitation == 0 + - nm_create_local_users.after.0.security_domains | length == 1 + - nm_create_local_users.after.0.security_domains.0.name == "all" + - nm_create_local_users.after.0.security_domains.0.roles | length == 1 + - nm_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - nm_create_local_users.after.0.time_interval_limitation == 0 + - nm_create_local_users.after.1.email == "ansibleuser@example.com" + - nm_create_local_users.after.1.first_name == "Ansible first name" + - nm_create_local_users.after.1.last_name == "Ansible last name" + - nm_create_local_users.after.1.login_id == "ansible_local_user" + - nm_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - nm_create_local_users.after.1.remote_user_authorization == true + - nm_create_local_users.after.1.reuse_limitation == 20 + - nm_create_local_users.after.1.security_domains | length == 1 + - nm_create_local_users.after.1.security_domains.0.name == "all" + - nm_create_local_users.after.1.security_domains.0.roles | length == 2 + - nm_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - nm_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - nm_create_local_users.after.1.time_interval_limitation == 10 + - nm_create_local_users.after.2.login_id == "ansible_local_user_2" + - nm_create_local_users.after.2.security_domains | length == 1 + - nm_create_local_users.after.2.security_domains.0.name == "all" + - nm_create_local_users.before | length == 1 + - nm_create_local_users.before.0.login_id == "admin" + - nm_create_local_users.before.0.first_name == "admin" + - nm_create_local_users.before.0.remote_user_authorization == false + - nm_create_local_users.before.0.reuse_limitation == 0 + - nm_create_local_users.before.0.security_domains | length == 1 + - nm_create_local_users.before.0.security_domains.0.name == "all" + - nm_create_local_users.before.0.security_domains.0.roles | length == 1 + - nm_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - nm_create_local_users.before.0.time_interval_limitation == 0 + - nm_create_local_users.diff == [] + - nm_create_local_users.proposed.0.email == "ansibleuser@example.com" + - nm_create_local_users.proposed.0.first_name == "Ansible first name" + - nm_create_local_users.proposed.0.last_name == "Ansible last name" + - nm_create_local_users.proposed.0.login_id == "ansible_local_user" + - nm_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - nm_create_local_users.proposed.0.remote_user_authorization == true + - nm_create_local_users.proposed.0.reuse_limitation == 20 + - nm_create_local_users.proposed.0.security_domains | length == 1 + - nm_create_local_users.proposed.0.security_domains.0.name == "all" + - nm_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - nm_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - nm_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - nm_create_local_users.proposed.0.time_interval_limitation == 10 + - nm_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - nm_create_local_users.proposed.1.security_domains | length == 1 + - nm_create_local_users.proposed.1.security_domains.0.name == "all" # UPDATE -- name: Update all ansible_local_user's attributes (check mode) +- name: Replace all ansible_local_user's attributes (check mode) cisco.nd.nd_local_user: &update_first_local_user <<: *nd_info config: @@ -72,12 +182,12 @@ remote_user_authorization: false state: replaced check_mode: true - register: cm_update_local_user + register: cm_replace_local_user -- name: Update local user (normal mode) +- name: Replace all ansible_local_user's attributes (normal mode) cisco.nd.nd_local_user: <<: *update_first_local_user - register: nm_update_local_user + register: nm_replace_local_user - name: Update all ansible_local_user_2's attributes except password cisco.nd.nd_local_user: &update_second_local_user @@ -95,12 +205,178 @@ remote_id_claim: ansible_remote_user_2 remote_user_authorization: true state: merged - register: nm_update_local_user_2 + register: nm_merge_local_user_2 - name: Update all ansible_local_user_2's attributes except password again (idempotency) cisco.nd.nd_local_user: <<: *update_second_local_user - register: nm_update_local_user_2_again + register: nm_merge_local_user_2_again + + +- name: Override local users with minimum configuration + cisco.nd.nd_local_user: + <<: *nd_info + config: + - login_id: admin + first_name: admin + remote_user_authorization: false + reuse_limitation: 0 + time_interval_limitation: 0 + security_domains: + - name: all + roles: + - super_admin + - email: overrideansibleuser@example.com + login_id: ansible_local_user + first_name: Overridden Ansible first name + last_name: Overriden Ansible last name + user_password: overideansibleLocalUserPassword1% + reuse_limitation: 15 + time_interval_limitation: 5 + security_domains: + - name: all + roles: + - observer + remote_id_claim: ansible_remote_user + remote_user_authorization: true + - login_id: ansible_local_user_3 + user_password: ansibleLocalUser3Password1%Test + security_domains: + - name: all + state: overridden + register: nm_override_local_users + +- name: Asserts for local users update tasks + ansible.builtin.assert: + that: + - cm_replace_local_user is changed + - cm_replace_local_user.after | length == 3 + - cm_replace_local_user.after.0.login_id == "ansible_local_user_2" + - cm_replace_local_user.after.0.security_domains | length == 1 + - cm_replace_local_user.after.0.security_domains.0.name == "all" + - cm_replace_local_user.after.1.email == "updatedansibleuser@example.com"" + - cm_replace_local_user.after.1.first_name == "Updated Ansible first name" + - cm_replace_local_user.after.1.last_name == "Updated Ansible last name" + - cm_replace_local_user.after.1.login_id == "ansible_local_user" + - cm_replace_local_user.after.1.remote_id_claim == "" + - cm_replace_local_user.after.1.remote_user_authorization == false + - cm_replace_local_user.after.1.reuse_limitation == 25 + - cm_replace_local_user.after.1.security_domains | length == 1 + - cm_replace_local_user.after.1.security_domains.0.name == "all" + - cm_replace_local_user.after.1.security_domains.0.roles | length == 1 + - cm_replace_local_user.after.1.security_domains.0.roles.0 == "super_admin" + - cm_replace_local_user.after.1.time_interval_limitation == 15 + - cm_replace_local_user.after.2.login_id == "admin" + - cm_replace_local_user.after.2.first_name == "admin" + - cm_replace_local_user.after.2.remote_user_authorization == false + - cm_replace_local_user.after.2.reuse_limitation == 0 + - cm_replace_local_user.after.2.security_domains | length == 1 + - cm_replace_local_user.after.2.security_domains.0.name == "all" + - cm_replace_local_user.after.2.security_domains.0.roles | length == 1 + - cm_replace_local_user.after.2.security_domains.0.roles.0 == "super_admin" + - cm_replace_local_user.after.2.time_interval_limitation == 0 + - cm_replace_local_user.before | length == 3 + - cm_replace_local_user.before.2.first_name == "admin" + - cm_replace_local_user.before.2.remote_user_authorization == false + - cm_replace_local_user.before.2.reuse_limitation == 0 + - cm_replace_local_user.before.2.security_domains | length == 1 + - cm_replace_local_user.before.2.security_domains.0.name == "all" + - cm_replace_local_user.before.2.security_domains.0.roles | length == 1 + - cm_replace_local_user.before.2.security_domains.0.roles.0 == "super_admin" + - cm_replace_local_user.before.2.time_interval_limitation == 0 + - cm_replace_local_user.before.1.email == "ansibleuser@example.com" + - cm_replace_local_user.before.1.first_name == "Ansible first name" + - cm_replace_local_user.before.1.last_name == "Ansible last name" + - cm_replace_local_user.before.1.login_id == "ansible_local_user" + - cm_replace_local_user.before.1.remote_id_claim == "ansible_remote_user" + - cm_replace_local_user.before.1.remote_user_authorization == true + - cm_replace_local_user.before.1.reuse_limitation == 20 + - cm_replace_local_user.before.1.security_domains | length == 1 + - cm_replace_local_user.before.1.security_domains.0.name == "all" + - cm_replace_local_user.before.1.security_domains.0.roles | length == 2 + - cm_replace_local_user.before.1.security_domains.0.roles.0 == "observer" + - cm_replace_local_user.before.1.security_domains.0.roles.1 == "support_engineer" + - cm_replace_local_user.before.1.time_interval_limitation == 10 + - cm_replace_local_user.before.0.login_id == "ansible_local_user_2" + - cm_replace_local_user.before.0.security_domains | length == 1 + - cm_replace_local_user.before.0.security_domains.0.name == "all" + - cm_replace_local_user.diff == [] + - cm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com"" + - cm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" + - cm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" + - cm_replace_local_user.proposed.0.login_id == "ansible_local_user" + - cm_replace_local_user.proposed.0.remote_id_claim == "" + - cm_replace_local_user.proposed.0.remote_user_authorization == false + - cm_replace_local_user.proposed.0.reuse_limitation == 25 + - cm_replace_local_user.proposed.0.security_domains | length == 1 + - cm_replace_local_user.proposed.0.security_domains.0.name == "all" + - cm_replace_local_user.proposed.0.security_domains.0.roles | length == 1 + - cm_replace_local_user.proposed.0.security_domains.0.roles.0 == "super_admin" + - cm_replace_local_user.proposed.0.time_interval_limitation == 15 + - nm_replace_local_user is changed + - nm_replace_local_user.after | length == 3 + - nm_replace_local_user.after.0.login_id == "ansible_local_user_2" + - nm_replace_local_user.after.0.security_domains | length == 1 + - nm_replace_local_user.after.0.security_domains.0.name == "all" + - nm_replace_local_user.after.1.email == "updatedansibleuser@example.com"" + - nm_replace_local_user.after.1.first_name == "Updated Ansible first name" + - nm_replace_local_user.after.1.last_name == "Updated Ansible last name" + - nm_replace_local_user.after.1.login_id == "ansible_local_user" + - nm_replace_local_user.after.1.remote_id_claim == "" + - nm_replace_local_user.after.1.remote_user_authorization == false + - nm_replace_local_user.after.1.reuse_limitation == 25 + - nm_replace_local_user.after.1.security_domains | length == 1 + - nm_replace_local_user.after.1.security_domains.0.name == "all" + - nm_replace_local_user.after.1.security_domains.0.roles | length == 1 + - nm_replace_local_user.after.1.security_domains.0.roles.0 == "super_admin" + - nm_replace_local_user.after.1.time_interval_limitation == 15 + - nm_replace_local_user.after.2.login_id == "admin" + - nm_replace_local_user.after.2.first_name == "admin" + - nm_replace_local_user.after.2.remote_user_authorization == false + - nm_replace_local_user.after.2.reuse_limitation == 0 + - nm_replace_local_user.after.2.security_domains | length == 1 + - nm_replace_local_user.after.2.security_domains.0.name == "all" + - nm_replace_local_user.after.2.security_domains.0.roles | length == 1 + - nm_replace_local_user.after.2.security_domains.0.roles.0 == "super_admin" + - nm_replace_local_user.after.2.time_interval_limitation == 0 + - nm_replace_local_user.before | length == 3 + - nm_replace_local_user.before.2.first_name == "admin" + - nm_replace_local_user.before.2.remote_user_authorization == false + - nm_replace_local_user.before.2.reuse_limitation == 0 + - nm_replace_local_user.before.2.security_domains | length == 1 + - nm_replace_local_user.before.2.security_domains.0.name == "all" + - nm_replace_local_user.before.2.security_domains.0.roles | length == 1 + - nm_replace_local_user.before.2.security_domains.0.roles.0 == "super_admin" + - nm_replace_local_user.before.2.time_interval_limitation == 0 + - nm_replace_local_user.before.1.email == "ansibleuser@example.com" + - nm_replace_local_user.before.1.first_name == "Ansible first name" + - nm_replace_local_user.before.1.last_name == "Ansible last name" + - nm_replace_local_user.before.1.login_id == "ansible_local_user" + - nm_replace_local_user.before.1.remote_id_claim == "ansible_remote_user" + - nm_replace_local_user.before.1.remote_user_authorization == true + - nm_replace_local_user.before.1.reuse_limitation == 20 + - nm_replace_local_user.before.1.security_domains | length == 1 + - nm_replace_local_user.before.1.security_domains.0.name == "all" + - nm_replace_local_user.before.1.security_domains.0.roles | length == 2 + - nm_replace_local_user.before.1.security_domains.0.roles.0 == "observer" + - nm_replace_local_user.before.1.security_domains.0.roles.1 == "support_engineer" + - nm_replace_local_user.before.1.time_interval_limitation == 10 + - nm_replace_local_user.before.0.login_id == "ansible_local_user_2" + - nm_replace_local_user.before.0.security_domains | length == 1 + - nm_replace_local_user.before.0.security_domains.0.name == "all" + - nm_replace_local_user.diff == [] + - nm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com"" + - nm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" + - nm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" + - nm_replace_local_user.proposed.0.login_id == "ansible_local_user" + - nm_replace_local_user.proposed.0.remote_id_claim == "" + - nm_replace_local_user.proposed.0.remote_user_authorization == false + - nm_replace_local_user.proposed.0.reuse_limitation == 25 + - nm_replace_local_user.proposed.0.security_domains | length == 1 + - nm_replace_local_user.proposed.0.security_domains.0.name == "all" + - nm_replace_local_user.proposed.0.security_domains.0.roles | length == 1 + - nm_replace_local_user.proposed.0.security_domains.0.roles.0 == "super_admin" + - nm_replace_local_user.proposed.0.time_interval_limitation == 15 # DELETE @@ -123,6 +399,9 @@ <<: *delete_local_user register: nm_delete_local_user_again +- name: Asserts for local users deletion tasks + ansible.builtin.assert: + that: # CLEAN UP - name: Ensure local users do not exist @@ -131,4 +410,5 @@ config: - login_id: ansible_local_user - login_id: ansible_local_user_2 + - login_id: ansible_local_user_3 state: deleted From 401deedce51f5284617b534b9d3ffc8031989d8e Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 12 Mar 2026 11:45:29 -0400 Subject: [PATCH 053/109] [ignore] Finish integration test file for nd_local_user module. Remove Generic Class inheritence from NDConfigCollection. Clean Pydantic imports. --- plugins/module_utils/models/base.py | 2 +- plugins/module_utils/nd_config_collection.py | 33 +-- plugins/module_utils/nd_output.py | 2 +- plugins/module_utils/nd_state_machine.py | 9 +- plugins/module_utils/utils.py | 2 +- plugins/modules/nd_local_user.py | 2 +- .../targets/nd_local_user/tasks/main.yml | 267 +++++++++++++++++- 7 files changed, 275 insertions(+), 42 deletions(-) diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 79f9ec80..21fb983e 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -9,7 +9,7 @@ __metaclass__ = type from abc import ABC -from pydantic import BaseModel, ConfigDict +from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict from typing import List, Dict, Any, ClassVar, Set, Tuple, Union, Literal, Optional from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 364b8a8f..d34ca462 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -11,33 +11,30 @@ from typing import TypeVar, Generic, Optional, List, Dict, Any, Literal from copy import deepcopy from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey -# Type aliases -ModelType = TypeVar("ModelType", bound=NDBaseModel) -class NDConfigCollection(Generic[ModelType]): +class NDConfigCollection: """ Nexus Dashboard configuration collection for NDBaseModel instances. """ - def __init__(self, model_class: ModelType, items: Optional[List[ModelType]] = None): + def __init__(self, model_class: NDBaseModel, items: Optional[List[NDBaseModel]] = None): """ Initialize collection. """ - self._model_class: ModelType = model_class + self._model_class: NDBaseModel = model_class # Dual storage - self._items: List[ModelType] = [] + self._items: List[NDBaseModel] = [] self._index: Dict[IdentifierKey, int] = {} if items: for item in items: self.add(item) - def _extract_key(self, item: ModelType) -> IdentifierKey: + def _extract_key(self, item: NDBaseModel) -> IdentifierKey: """ Extract identifier key from item. """ @@ -56,7 +53,7 @@ def _rebuild_index(self) -> None: # Core Operations - def add(self, item: ModelType) -> IdentifierKey: + def add(self, item: NDBaseModel) -> IdentifierKey: """ Add item to collection (O(1) operation). """ @@ -74,14 +71,14 @@ def add(self, item: ModelType) -> IdentifierKey: return key - def get(self, key: IdentifierKey) -> Optional[ModelType]: + def get(self, key: IdentifierKey) -> Optional[NDBaseModel]: """ Get item by identifier key (O(1) operation). """ index = self._index.get(key) return self._items[index] if index is not None else None - def replace(self, item: ModelType) -> bool: + def replace(self, item: NDBaseModel) -> bool: """ Replace existing item with same identifier (O(1) operation). """ @@ -97,7 +94,7 @@ def replace(self, item: ModelType) -> bool: self._items[index] = item return True - def merge(self, item: ModelType) -> ModelType: + def merge(self, item: NDBaseModel) -> NDBaseModel: """ Merge item with existing, or add if not present. """ @@ -129,7 +126,7 @@ def delete(self, key: IdentifierKey) -> bool: # Diff Operations # NOTE: Maybe add a similar one in the NDBaseModel (-> but is it necessary?) - def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "changed"]: + def get_diff_config(self, new_item: NDBaseModel) -> Literal["new", "no_diff", "changed"]: """ Compare single item against collection. """ @@ -147,7 +144,7 @@ def get_diff_config(self, new_item: ModelType) -> Literal["new", "no_diff", "cha return "no_diff" if is_subset else "changed" - def get_diff_collection(self, other: "NDConfigCollection[ModelType]") -> bool: + def get_diff_collection(self, other: "NDConfigCollection") -> bool: """ Check if two collections differ. """ @@ -167,7 +164,7 @@ def get_diff_collection(self, other: "NDConfigCollection[ModelType]") -> bool: return False - def get_diff_identifiers(self, other: "NDConfigCollection[ModelType]") -> List[IdentifierKey]: + def get_diff_identifiers(self, other: "NDConfigCollection") -> List[IdentifierKey]: """ Get identifiers in self but not in other. """ @@ -189,7 +186,7 @@ def keys(self) -> List[IdentifierKey]: """Get all identifier keys.""" return list(self._index.keys()) - def copy(self) -> "NDConfigCollection[ModelType]": + def copy(self) -> "NDConfigCollection": """Create deep copy of collection.""" return NDConfigCollection(model_class=self._model_class, items=deepcopy(self._items)) @@ -208,7 +205,7 @@ def to_payload_list(self, **kwargs) -> List[Dict[str, Any]]: return [item.to_payload(**kwargs) for item in self._items] @classmethod - def from_ansible_config(cls, data: List[Dict], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": + def from_ansible_config(cls, data: List[Dict], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": """ Create collection from Ansible config. """ @@ -216,7 +213,7 @@ def from_ansible_config(cls, data: List[Dict], model_class: type[ModelType], **k return cls(model_class=model_class, items=items) @classmethod - def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[ModelType], **kwargs) -> "NDConfigCollection[ModelType]": + def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": """ Create collection from API response. """ diff --git a/plugins/module_utils/nd_output.py b/plugins/module_utils/nd_output.py index dbfc2cd2..0e5ed6ef 100644 --- a/plugins/module_utils/nd_output.py +++ b/plugins/module_utils/nd_output.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index bd86da3c..3b6c891c 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -41,17 +41,16 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.state = self.module.params["state"] # Initialize collections - self.nd_config_collection = NDConfigCollection[self.model_class] try: response_data = self.model_orchestrator.query_all() # State of configuration objects in ND before change execution - self.before = self.nd_config_collection.from_api_response(response_data=response_data, model_class=self.model_class) + self.before = NDConfigCollection.from_api_response(response_data=response_data, model_class=self.model_class) # State of current configuration objects in ND during change execution self.existing = self.before.copy() # Ongoing collection of configuration objects that were changed - self.sent = self.nd_config_collection(model_class=self.model_class) + self.sent = NDConfigCollection(model_class=self.model_class) # Collection of configuration objects given by user - self.proposed = self.nd_config_collection(model_class=self.model_class) + self.proposed = NDConfigCollection(model_class=self.model_class) for config in self.module.params.get("config", []): try: # Parse config into model diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index e09bd499..76e936bb 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index d1d871fe..56e59ad5 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/tests/integration/targets/nd_local_user/tasks/main.yml b/tests/integration/targets/nd_local_user/tasks/main.yml index de8ad5ed..b7f205ae 100644 --- a/tests/integration/targets/nd_local_user/tasks/main.yml +++ b/tests/integration/targets/nd_local_user/tasks/main.yml @@ -1,5 +1,5 @@ # Test code for the ND modules -# Copyright: (c) 2025, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -19,6 +19,7 @@ config: - login_id: ansible_local_user - login_id: ansible_local_user_2 + - login_id: ansible_local_user_3 state: deleted # CREATE @@ -217,19 +218,10 @@ cisco.nd.nd_local_user: <<: *nd_info config: - - login_id: admin - first_name: admin - remote_user_authorization: false - reuse_limitation: 0 - time_interval_limitation: 0 - security_domains: - - name: all - roles: - - super_admin - email: overrideansibleuser@example.com login_id: ansible_local_user first_name: Overridden Ansible first name - last_name: Overriden Ansible last name + last_name: Overridden Ansible last name user_password: overideansibleLocalUserPassword1% reuse_limitation: 15 time_interval_limitation: 5 @@ -239,6 +231,15 @@ - observer remote_id_claim: ansible_remote_user remote_user_authorization: true + - login_id: admin + first_name: admin + remote_user_authorization: false + reuse_limitation: 0 + time_interval_limitation: 0 + security_domains: + - name: all + roles: + - super_admin - login_id: ansible_local_user_3 user_password: ansibleLocalUser3Password1%Test security_domains: @@ -254,7 +255,7 @@ - cm_replace_local_user.after.0.login_id == "ansible_local_user_2" - cm_replace_local_user.after.0.security_domains | length == 1 - cm_replace_local_user.after.0.security_domains.0.name == "all" - - cm_replace_local_user.after.1.email == "updatedansibleuser@example.com"" + - cm_replace_local_user.after.1.email == "updatedansibleuser@example.com" - cm_replace_local_user.after.1.first_name == "Updated Ansible first name" - cm_replace_local_user.after.1.last_name == "Updated Ansible last name" - cm_replace_local_user.after.1.login_id == "ansible_local_user" @@ -301,7 +302,7 @@ - cm_replace_local_user.before.0.security_domains | length == 1 - cm_replace_local_user.before.0.security_domains.0.name == "all" - cm_replace_local_user.diff == [] - - cm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com"" + - cm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com" - cm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" - cm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" - cm_replace_local_user.proposed.0.login_id == "ansible_local_user" @@ -318,7 +319,7 @@ - nm_replace_local_user.after.0.login_id == "ansible_local_user_2" - nm_replace_local_user.after.0.security_domains | length == 1 - nm_replace_local_user.after.0.security_domains.0.name == "all" - - nm_replace_local_user.after.1.email == "updatedansibleuser@example.com"" + - nm_replace_local_user.after.1.email == "updatedansibleuser@example.com" - nm_replace_local_user.after.1.first_name == "Updated Ansible first name" - nm_replace_local_user.after.1.last_name == "Updated Ansible last name" - nm_replace_local_user.after.1.login_id == "ansible_local_user" @@ -365,7 +366,7 @@ - nm_replace_local_user.before.0.security_domains | length == 1 - nm_replace_local_user.before.0.security_domains.0.name == "all" - nm_replace_local_user.diff == [] - - nm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com"" + - nm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com" - nm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" - nm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" - nm_replace_local_user.proposed.0.login_id == "ansible_local_user" @@ -377,6 +378,161 @@ - nm_replace_local_user.proposed.0.security_domains.0.roles | length == 1 - nm_replace_local_user.proposed.0.security_domains.0.roles.0 == "super_admin" - nm_replace_local_user.proposed.0.time_interval_limitation == 15 + - nm_merge_local_user_2 is changed + - nm_merge_local_user_2.after | length == 3 + - nm_merge_local_user_2.after.0.email == "secondansibleuser@example.com" + - nm_merge_local_user_2.after.0.first_name == "Second Ansible first name" + - nm_merge_local_user_2.after.0.last_name == "Second Ansible last name" + - nm_merge_local_user_2.after.0.login_id == "ansible_local_user_2" + - nm_merge_local_user_2.after.0.remote_id_claim == "ansible_remote_user_2" + - nm_merge_local_user_2.after.0.remote_user_authorization == true + - nm_merge_local_user_2.after.0.reuse_limitation == 20 + - nm_merge_local_user_2.after.0.security_domains | length == 1 + - nm_merge_local_user_2.after.0.security_domains.0.name == "all" + - nm_merge_local_user_2.after.0.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.after.0.security_domains.0.roles.0 == "fabric_admin" + - nm_merge_local_user_2.after.0.time_interval_limitation == 10 + - nm_merge_local_user_2.after.1.email == "updatedansibleuser@example.com" + - nm_merge_local_user_2.after.1.first_name == "Updated Ansible first name" + - nm_merge_local_user_2.after.1.last_name == "Updated Ansible last name" + - nm_merge_local_user_2.after.1.login_id == "ansible_local_user" + - nm_merge_local_user_2.after.1.remote_user_authorization == false + - nm_merge_local_user_2.after.1.reuse_limitation == 25 + - nm_merge_local_user_2.after.1.security_domains | length == 1 + - nm_merge_local_user_2.after.1.security_domains.0.name == "all" + - nm_merge_local_user_2.after.1.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.after.1.security_domains.0.roles.0 == "super_admin" + - nm_merge_local_user_2.after.1.time_interval_limitation == 15 + - nm_merge_local_user_2.after.2.login_id == "admin" + - nm_merge_local_user_2.after.2.first_name == "admin" + - nm_merge_local_user_2.after.2.remote_user_authorization == false + - nm_merge_local_user_2.after.2.reuse_limitation == 0 + - nm_merge_local_user_2.after.2.security_domains | length == 1 + - nm_merge_local_user_2.after.2.security_domains.0.name == "all" + - nm_merge_local_user_2.after.2.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.after.2.security_domains.0.roles.0 == "super_admin" + - nm_merge_local_user_2.after.2.time_interval_limitation == 0 + - nm_merge_local_user_2.before | length == 3 + - nm_merge_local_user_2.before.2.first_name == "admin" + - nm_merge_local_user_2.before.2.remote_user_authorization == false + - nm_merge_local_user_2.before.2.reuse_limitation == 0 + - nm_merge_local_user_2.before.2.security_domains | length == 1 + - nm_merge_local_user_2.before.2.security_domains.0.name == "all" + - nm_merge_local_user_2.before.2.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.before.2.security_domains.0.roles.0 == "super_admin" + - nm_merge_local_user_2.before.2.time_interval_limitation == 0 + - nm_merge_local_user_2.before.1.email == "updatedansibleuser@example.com" + - nm_merge_local_user_2.before.1.first_name == "Updated Ansible first name" + - nm_merge_local_user_2.before.1.last_name == "Updated Ansible last name" + - nm_merge_local_user_2.before.1.login_id == "ansible_local_user" + - nm_merge_local_user_2.before.1.remote_user_authorization == false + - nm_merge_local_user_2.before.1.reuse_limitation == 25 + - nm_merge_local_user_2.before.1.security_domains | length == 1 + - nm_merge_local_user_2.before.1.security_domains.0.name == "all" + - nm_merge_local_user_2.before.1.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.before.1.security_domains.0.roles.0 == "super_admin" + - nm_merge_local_user_2.before.1.time_interval_limitation == 15 + - nm_merge_local_user_2.before.0.login_id == "ansible_local_user_2" + - nm_merge_local_user_2.before.0.security_domains | length == 1 + - nm_merge_local_user_2.before.0.security_domains.0.name == "all" + - nm_merge_local_user_2.diff == [] + - nm_merge_local_user_2.proposed.0.email == "secondansibleuser@example.com" + - nm_merge_local_user_2.proposed.0.first_name == "Second Ansible first name" + - nm_merge_local_user_2.proposed.0.last_name == "Second Ansible last name" + - nm_merge_local_user_2.proposed.0.login_id == "ansible_local_user_2" + - nm_merge_local_user_2.proposed.0.remote_id_claim == "ansible_remote_user_2" + - nm_merge_local_user_2.proposed.0.remote_user_authorization == true + - nm_merge_local_user_2.proposed.0.reuse_limitation == 20 + - nm_merge_local_user_2.proposed.0.security_domains | length == 1 + - nm_merge_local_user_2.proposed.0.security_domains.0.name == "all" + - nm_merge_local_user_2.proposed.0.security_domains.0.roles | length == 1 + - nm_merge_local_user_2.proposed.0.security_domains.0.roles.0 == "fabric_admin" + - nm_merge_local_user_2.proposed.0.time_interval_limitation == 10 + - nm_merge_local_user_2_again is not changed + - nm_merge_local_user_2_again.after == nm_merge_local_user_2.after + - nm_merge_local_user_2_again.diff == [] + - nm_merge_local_user_2_again.proposed == nm_merge_local_user_2.proposed + - nm_override_local_users is changed + - nm_override_local_users.after | length == 3 + - nm_override_local_users.after.0.email == "overrideansibleuser@example.com" + - nm_override_local_users.after.0.first_name == "Overridden Ansible first name" + - nm_override_local_users.after.0.last_name == "Overridden Ansible last name" + - nm_override_local_users.after.0.login_id == "ansible_local_user" + - nm_override_local_users.after.0.remote_id_claim == "ansible_remote_user" + - nm_override_local_users.after.0.remote_user_authorization == true + - nm_override_local_users.after.0.reuse_limitation == 15 + - nm_override_local_users.after.0.security_domains | length == 1 + - nm_override_local_users.after.0.security_domains.0.name == "all" + - nm_override_local_users.after.0.security_domains.0.roles | length == 1 + - nm_override_local_users.after.0.security_domains.0.roles.0 == "observer" + - nm_override_local_users.after.0.time_interval_limitation == 5 + - nm_override_local_users.after.1.login_id == "admin" + - nm_override_local_users.after.1.first_name == "admin" + - nm_override_local_users.after.1.remote_user_authorization == false + - nm_override_local_users.after.1.reuse_limitation == 0 + - nm_override_local_users.after.1.security_domains | length == 1 + - nm_override_local_users.after.1.security_domains.0.name == "all" + - nm_override_local_users.after.1.security_domains.0.roles | length == 1 + - nm_override_local_users.after.1.security_domains.0.roles.0 == "super_admin" + - nm_override_local_users.after.1.time_interval_limitation == 0 + - nm_override_local_users.after.2.login_id == "ansible_local_user_3" + - nm_override_local_users.after.2.security_domains.0.name == "all" + - nm_override_local_users.before | length == 3 + - nm_override_local_users.before.2.first_name == "admin" + - nm_override_local_users.before.2.remote_user_authorization == false + - nm_override_local_users.before.2.reuse_limitation == 0 + - nm_override_local_users.before.2.security_domains | length == 1 + - nm_override_local_users.before.2.security_domains.0.name == "all" + - nm_override_local_users.before.2.security_domains.0.roles | length == 1 + - nm_override_local_users.before.2.security_domains.0.roles.0 == "super_admin" + - nm_override_local_users.before.2.time_interval_limitation == 0 + - nm_override_local_users.before.1.email == "updatedansibleuser@example.com" + - nm_override_local_users.before.1.first_name == "Updated Ansible first name" + - nm_override_local_users.before.1.last_name == "Updated Ansible last name" + - nm_override_local_users.before.1.login_id == "ansible_local_user" + - nm_override_local_users.before.1.remote_user_authorization == false + - nm_override_local_users.before.1.reuse_limitation == 25 + - nm_override_local_users.before.1.security_domains | length == 1 + - nm_override_local_users.before.1.security_domains.0.name == "all" + - nm_override_local_users.before.1.security_domains.0.roles | length == 1 + - nm_override_local_users.before.1.security_domains.0.roles.0 == "super_admin" + - nm_override_local_users.before.1.time_interval_limitation == 15 + - nm_override_local_users.before.0.email == "secondansibleuser@example.com" + - nm_override_local_users.before.0.first_name == "Second Ansible first name" + - nm_override_local_users.before.0.last_name == "Second Ansible last name" + - nm_override_local_users.before.0.login_id == "ansible_local_user_2" + - nm_override_local_users.before.0.remote_id_claim == "ansible_remote_user_2" + - nm_override_local_users.before.0.remote_user_authorization == true + - nm_override_local_users.before.0.reuse_limitation == 20 + - nm_override_local_users.before.0.security_domains | length == 1 + - nm_override_local_users.before.0.security_domains.0.name == "all" + - nm_override_local_users.before.0.security_domains.0.roles | length == 1 + - nm_override_local_users.before.0.security_domains.0.roles.0 == "fabric_admin" + - nm_override_local_users.before.0.time_interval_limitation == 10 + - nm_override_local_users.diff == [] + - nm_override_local_users.proposed.0.email == "overrideansibleuser@example.com" + - nm_override_local_users.proposed.0.first_name == "Overridden Ansible first name" + - nm_override_local_users.proposed.0.last_name == "Overridden Ansible last name" + - nm_override_local_users.proposed.0.login_id == "ansible_local_user" + - nm_override_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - nm_override_local_users.proposed.0.remote_user_authorization == true + - nm_override_local_users.proposed.0.reuse_limitation == 15 + - nm_override_local_users.proposed.0.security_domains | length == 1 + - nm_override_local_users.proposed.0.security_domains.0.name == "all" + - nm_override_local_users.proposed.0.security_domains.0.roles | length == 1 + - nm_override_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - nm_override_local_users.proposed.0.time_interval_limitation == 5 + - nm_override_local_users.proposed.1.login_id == "admin" + - nm_override_local_users.proposed.1.first_name == "admin" + - nm_override_local_users.proposed.1.remote_user_authorization == false + - nm_override_local_users.proposed.1.reuse_limitation == 0 + - nm_override_local_users.proposed.1.security_domains | length == 1 + - nm_override_local_users.proposed.1.security_domains.0.name == "all" + - nm_override_local_users.proposed.1.security_domains.0.roles | length == 1 + - nm_override_local_users.proposed.1.security_domains.0.roles.0 == "super_admin" + - nm_override_local_users.proposed.1.time_interval_limitation == 0 + - nm_override_local_users.proposed.2.login_id == "ansible_local_user_3" + - nm_override_local_users.proposed.2.security_domains.0.name == "all" # DELETE @@ -402,6 +558,87 @@ - name: Asserts for local users deletion tasks ansible.builtin.assert: that: + - cm_delete_local_user is changed + - cm_delete_local_user.after | length == 2 + - cm_delete_local_user.after.0.login_id == "ansible_local_user_3" + - cm_delete_local_user.after.0.security_domains.0.name == "all" + - cm_delete_local_user.after.1.login_id == "admin" + - cm_delete_local_user.after.1.first_name == "admin" + - cm_delete_local_user.after.1.remote_user_authorization == false + - cm_delete_local_user.after.1.reuse_limitation == 0 + - cm_delete_local_user.after.1.security_domains | length == 1 + - cm_delete_local_user.after.1.security_domains.0.name == "all" + - cm_delete_local_user.after.1.security_domains.0.roles | length == 1 + - cm_delete_local_user.after.1.security_domains.0.roles.0 == "super_admin" + - cm_delete_local_user.after.1.time_interval_limitation == 0 + - cm_delete_local_user.before | length == 3 + - cm_delete_local_user.before.0.email == "overrideansibleuser@example.com" + - cm_delete_local_user.before.0.first_name == "Overridden Ansible first name" + - cm_delete_local_user.before.0.last_name == "Overridden Ansible last name" + - cm_delete_local_user.before.0.login_id == "ansible_local_user" + - cm_delete_local_user.before.0.remote_id_claim == "ansible_remote_user" + - cm_delete_local_user.before.0.remote_user_authorization == true + - cm_delete_local_user.before.0.reuse_limitation == 15 + - cm_delete_local_user.before.0.security_domains | length == 1 + - cm_delete_local_user.before.0.security_domains.0.name == "all" + - cm_delete_local_user.before.0.security_domains.0.roles | length == 1 + - cm_delete_local_user.before.0.security_domains.0.roles.0 == "observer" + - cm_delete_local_user.before.0.time_interval_limitation == 5 + - cm_delete_local_user.before.1.login_id == "ansible_local_user_3" + - cm_delete_local_user.before.1.security_domains.0.name == "all" + - cm_delete_local_user.before.2.first_name == "admin" + - cm_delete_local_user.before.2.remote_user_authorization == false + - cm_delete_local_user.before.2.reuse_limitation == 0 + - cm_delete_local_user.before.2.security_domains | length == 1 + - cm_delete_local_user.before.2.security_domains.0.name == "all" + - cm_delete_local_user.before.2.security_domains.0.roles | length == 1 + - cm_delete_local_user.before.2.security_domains.0.roles.0 == "super_admin" + - cm_delete_local_user.before.2.time_interval_limitation == 0 + - cm_delete_local_user.diff == [] + - cm_delete_local_user.proposed.0.login_id == "ansible_local_user" + - nm_delete_local_user is changed + - nm_delete_local_user.after | length == 2 + - nm_delete_local_user.after.0.login_id == "ansible_local_user_3" + - nm_delete_local_user.after.0.security_domains.0.name == "all" + - nm_delete_local_user.after.1.login_id == "admin" + - nm_delete_local_user.after.1.first_name == "admin" + - nm_delete_local_user.after.1.remote_user_authorization == false + - nm_delete_local_user.after.1.reuse_limitation == 0 + - nm_delete_local_user.after.1.security_domains | length == 1 + - nm_delete_local_user.after.1.security_domains.0.name == "all" + - nm_delete_local_user.after.1.security_domains.0.roles | length == 1 + - nm_delete_local_user.after.1.security_domains.0.roles.0 == "super_admin" + - nm_delete_local_user.after.1.time_interval_limitation == 0 + - nm_delete_local_user.before | length == 3 + - nm_delete_local_user.before.0.email == "overrideansibleuser@example.com" + - nm_delete_local_user.before.0.first_name == "Overridden Ansible first name" + - nm_delete_local_user.before.0.last_name == "Overridden Ansible last name" + - nm_delete_local_user.before.0.login_id == "ansible_local_user" + - nm_delete_local_user.before.0.remote_id_claim == "ansible_remote_user" + - nm_delete_local_user.before.0.remote_user_authorization == true + - nm_delete_local_user.before.0.reuse_limitation == 15 + - nm_delete_local_user.before.0.security_domains | length == 1 + - nm_delete_local_user.before.0.security_domains.0.name == "all" + - nm_delete_local_user.before.0.security_domains.0.roles | length == 1 + - nm_delete_local_user.before.0.security_domains.0.roles.0 == "observer" + - nm_delete_local_user.before.0.time_interval_limitation == 5 + - nm_delete_local_user.before.1.login_id == "ansible_local_user_3" + - nm_delete_local_user.before.1.security_domains.0.name == "all" + - nm_delete_local_user.before.2.first_name == "admin" + - nm_delete_local_user.before.2.remote_user_authorization == false + - nm_delete_local_user.before.2.reuse_limitation == 0 + - nm_delete_local_user.before.2.security_domains | length == 1 + - nm_delete_local_user.before.2.security_domains.0.name == "all" + - nm_delete_local_user.before.2.security_domains.0.roles | length == 1 + - nm_delete_local_user.before.2.security_domains.0.roles.0 == "super_admin" + - nm_delete_local_user.before.2.time_interval_limitation == 0 + - nm_delete_local_user.diff == [] + - nm_delete_local_user.proposed.0.login_id == "ansible_local_user" + - nm_delete_local_user_again is not changed + - nm_delete_local_user_again.after == nm_delete_local_user.after + - nm_delete_local_user_again.before == nm_delete_local_user.after + - nm_delete_local_user_again.diff == [] + - nm_delete_local_user_again.proposed == nm_delete_local_user.proposed # CLEAN UP - name: Ensure local users do not exist From a325955615dbcf6e11a6eb9e2e3ee63d0ebfa4c5 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 12 Mar 2026 12:33:23 -0400 Subject: [PATCH 054/109] [ignore] Fix sanity issues by enhancing pydantic_compat.py. Fix Black formatting. --- plugins/module_utils/models/local_user.py | 1 - plugins/module_utils/nd_config_collection.py | 3 +-- plugins/module_utils/pydantic_compat.py | 14 ++++++++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 0320d3c1..38f2b5d2 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -23,7 +23,6 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel from ansible_collections.cisco.nd.plugins.module_utils.constants import NDConstantMapping - USER_ROLES_MAPPING = NDConstantMapping( { "fabric_admin": "fabric-admin", diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index d34ca462..fa574ca2 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -8,13 +8,12 @@ __metaclass__ = type -from typing import TypeVar, Generic, Optional, List, Dict, Any, Literal +from typing import Optional, List, Dict, Any, Literal from copy import deepcopy from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey - class NDConfigCollection: """ Nexus Dashboard configuration collection for NDBaseModel instances. diff --git a/plugins/module_utils/pydantic_compat.py b/plugins/module_utils/pydantic_compat.py index 4456018a..2596d852 100644 --- a/plugins/module_utils/pydantic_compat.py +++ b/plugins/module_utils/pydantic_compat.py @@ -192,6 +192,20 @@ def decorator(func): return decorator + # Fallback: FieldSerializationInfo placeholder class that does nothing + class FieldSerializationInfo: + """Pydantic FieldSerializationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + pass + + # Fallback: SerializationInfo placeholder class that does nothing + class SerializationInfo: + """Pydantic SerializationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + pass + else: HAS_PYDANTIC = True # pylint: disable=invalid-name PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name From ea067693ae4078b77859ba9447ee24b547dd08b3 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 12 Mar 2026 12:48:36 -0400 Subject: [PATCH 055/109] [ignore] Remove all TODO comments. --- plugins/module_utils/endpoints/base.py | 4 +--- plugins/module_utils/endpoints/v1/infra_aaa_local_users.py | 1 - plugins/module_utils/nd_config_collection.py | 1 - plugins/module_utils/nd_state_machine.py | 2 -- plugins/module_utils/orchestrators/base.py | 2 -- plugins/module_utils/utils.py | 1 - plugins/modules/nd_local_user.py | 3 --- 7 files changed, 1 insertion(+), 13 deletions(-) diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py index 2d214878..e5eb8c72 100644 --- a/plugins/module_utils/endpoints/base.py +++ b/plugins/module_utils/endpoints/base.py @@ -131,9 +131,7 @@ def verb(self) -> HttpVerbEnum: None """ - - # TODO: Maybe to be modifed to be more Pydantic (low priority) - # TODO: Maybe change function's name (low priority) + # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration @abstractmethod def set_identifiers(self, identifier: IdentifierKey = None): diff --git a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py index d1013e24..9235afb6 100644 --- a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py +++ b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py @@ -31,7 +31,6 @@ class _V1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseEndpoint): /api/v1/infra/aaa/localUsers endpoint. """ - # TODO: Remove it base_path: Final = NDBasePath.nd_infra_aaa("localUsers") @property diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index fa574ca2..abcfc0f7 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -42,7 +42,6 @@ def _extract_key(self, item: NDBaseModel) -> IdentifierKey: except Exception as e: raise ValueError(f"Failed to extract identifier: {e}") from e - # TODO: optimize it -> only needed for delete method (low priority) def _rebuild_index(self) -> None: """Rebuild index from scratch (O(n) operation).""" self._index.clear() diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 3b6c891c..3840b360 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -27,7 +27,6 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest """ Initialize the Network Resource Module. """ - # TODO: Revisit Module initialization and configuration with rest_send self.module = module self.nd_module = NDModule(self.module) @@ -37,7 +36,6 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest # Configuration self.model_orchestrator = model_orchestrator(sender=self.nd_module) self.model_class = self.model_orchestrator.model_class - # TODO: Revisit these class variables when udpating Module intialization and configuration (low priority) self.state = self.module.params["state"] # Initialize collections diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 1a8b4f10..ddcb7569 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -34,11 +34,9 @@ class NDBaseOrchestrator(BaseModel): query_all_endpoint: Type[NDBaseEndpoint] # NOTE: Module Field is always required - # TODO: Replace it with future sender (low priority) sender: NDModule # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") - # TODO: Explore new ways to make them even more general -> e.g., create a general API operation function (low priority) def create(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: try: api_endpoint = self.create_endpoint() diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 76e936bb..2e62c6eb 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -56,7 +56,6 @@ def issubset(subset: Any, superset: Any) -> bool: return True -# TODO: Might not necessary with Pydantic validation and serialization built-in methods (see models/local_user) def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) -> Dict: """Remove unwanted keys from dict (supports nested paths).""" data = deepcopy(data) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 56e59ad5..f5efea03 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -199,9 +199,6 @@ def main(): ) # Manage state - # TODO: return module output class object: - # output = nd_state_machine.manage_state() - # module.exit_json(**output) nd_state_machine.manage_state() module.exit_json(**nd_state_machine.output.format()) From 47af5f4be6f886dcb855c7ef6028423f1f4ecb4f Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 12 Mar 2026 13:47:23 -0400 Subject: [PATCH 056/109] [ignore] Update endpoints to match latest nd42_integration branch. Update orchestrators accordingly. --- .../endpoints/v1/infra/aaa_local_users.py | 209 +++++++++ .../endpoints/v1/infra_aaa_local_users.py | 178 ------- plugins/module_utils/orchestrators/base.py | 14 +- .../module_utils/orchestrators/local_user.py | 24 +- ..._endpoints_api_v1_infra_aaa_local_users.py | 437 ++++++++++++++++++ 5 files changed, 665 insertions(+), 197 deletions(-) create mode 100644 plugins/module_utils/endpoints/v1/infra/aaa_local_users.py delete mode 100644 plugins/module_utils/endpoints/v1/infra_aaa_local_users.py create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_aaa_local_users.py diff --git a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py new file mode 100644 index 00000000..925c5548 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py @@ -0,0 +1,209 @@ +# Copyright: (c) 2026, Gaspard Micol (@gmicol) +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Infra AAA Local Users endpoint models. + +This module contains endpoint definitions for AAA Local Users operations in the ND Infra API. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import Field +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.base_path import BasePath + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey + + +class _EpInfraAaaLocalUsersBase(LoginIdMixin, NDEndpointBaseModel): + """ + Base class for ND Infra AAA Local Users endpoints. + + Provides common functionality for all HTTP methods on the /api/v1/infra/aaa/localUsers endpoint. + """ + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path. + + ## Returns + + - Complete endpoint path string, optionally including login_id + """ + if self.login_id is not None: + return BasePath.path("aaa", "localUsers", self.login_id) + return BasePath.path("aaa", "localUsers") + + def set_identifiers(self, identifier: IdentifierKey = None): + self.login_id = identifier + + +class EpInfraAaaLocalUsersGet(_EpInfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users GET Endpoint + + ## Description + + Endpoint to retrieve local users from the ND Infra AAA service. + Optionally retrieve a specific local user by login_id. + + ## Path + + - /api/v1/infra/aaa/localUsers + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - GET + + ## Usage + + ```python + # Get all local users + request = EpApiV1InfraAaaLocalUsersGet() + path = request.path + verb = request.verb + + # Get specific local user + request = EpApiV1InfraAaaLocalUsersGet() + request.login_id = "admin" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpInfraAaaLocalUsersGet"] = Field(default="EpInfraAaaLocalUsersGet", frozen=True, description="Class name for backward compatibility") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class EpInfraAaaLocalUsersPost(_EpInfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users POST Endpoint + + ## Description + + Endpoint to create a local user in the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers + + ## Verb + + - POST + + ## Usage + + ```python + request = EpApiV1InfraAaaLocalUsersPost() + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpInfraAaaLocalUsersPost"] = Field( + default="EpInfraAaaLocalUsersPost", frozen=True, description="Class name for backward compatibility" + ) + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpInfraAaaLocalUsersPut(_EpInfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users PUT Endpoint + + ## Description + + Endpoint to update a local user in the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - PUT + + ## Usage + + ```python + request = EpApiV1InfraAaaLocalUsersPut() + request.login_id = "admin" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpInfraAaaLocalUsersPut"] = Field(default="EpInfraAaaLocalUsersPut", frozen=True, description="Class name for backward compatibility") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.PUT + + +class EpInfraAaaLocalUsersDelete(_EpInfraAaaLocalUsersBase): + """ + # Summary + + ND Infra AAA Local Users DELETE Endpoint + + ## Description + + Endpoint to delete a local user from the ND Infra AAA service. + + ## Path + + - /api/v1/infra/aaa/localUsers/{login_id} + + ## Verb + + - DELETE + + ## Usage + + ```python + request = EpApiV1InfraAaaLocalUsersDelete() + request.login_id = "admin" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpInfraAaaLocalUsersDelete"] = Field( + default="EpInfraAaaLocalUsersDelete", frozen=True, description="Class name for backward compatibility" + ) + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.DELETE diff --git a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py deleted file mode 100644 index 9235afb6..00000000 --- a/plugins/module_utils/endpoints/v1/infra_aaa_local_users.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Allen Robel (@allenrobel) -# Copyright: (c) 2026, Gaspard Micol (@gmicol) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -""" -ND Infra AAA LocalUsers endpoint models. - -This module contains endpoint definitions for LocalUsers-related operations -in the ND Infra AAA API. -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -from typing import Literal, Final -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.enums import VerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint, NDBasePath -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import Field -from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey - - -class _V1InfraAaaLocalUsersBase(LoginIdMixin, NDBaseEndpoint): - """ - Base class for ND Infra AAA Local Users endpoints. - - Provides common functionality for all HTTP methods on the - /api/v1/infra/aaa/localUsers endpoint. - """ - - base_path: Final = NDBasePath.nd_infra_aaa("localUsers") - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path. - - ## Returns - - - Complete endpoint path string, optionally including login_id - """ - if self.login_id is not None: - return NDBasePath.nd_infra_aaa("localUsers", self.login_id) - return self.base_path - - def set_identifiers(self, identifier: IdentifierKey = None): - self.login_id = identifier - - -class V1InfraAaaLocalUsersGet(_V1InfraAaaLocalUsersBase): - """ - # Summary - - ND Infra AAA Local Users GET Endpoint - - ## Description - - Endpoint to retrieve local users from the ND Infra AAA service. - Optionally retrieve a specific local user by login_id. - - ## Path - - - /api/v1/infra/aaa/localUsers - - /api/v1/infra/aaa/localUsers/{login_id} - - ## Verb - - - GET - """ - - class_name: Literal["V1InfraAaaLocalUsersGet"] = Field( - default="V1InfraAaaLocalUsersGet", - description="Class name for backward compatibility", - frozen=True, - ) - - @property - def verb(self) -> VerbEnum: - """Return the HTTP verb for this endpoint.""" - return VerbEnum.GET - - -class V1InfraAaaLocalUsersPost(_V1InfraAaaLocalUsersBase): - """ - # Summary - - ND Infra AAA Local Users POST Endpoint - - ## Description - - Endpoint to create a local user in the ND Infra AAA service. - - ## Path - - - /api/v1/infra/aaa/localUsers - - ## Verb - - - POST - """ - - class_name: Literal["V1InfraAaaLocalUsersPost"] = Field( - default="V1InfraAaaLocalUsersPost", - description="Class name for backward compatibility", - frozen=True, - ) - - @property - def verb(self) -> VerbEnum: - """Return the HTTP verb for this endpoint.""" - return VerbEnum.POST - - -class V1InfraAaaLocalUsersPut(_V1InfraAaaLocalUsersBase): - """ - # Summary - - ND Infra AAA Local Users PUT Endpoint - - ## Description - - Endpoint to update a local user in the ND Infra AAA service. - - ## Path - - - /api/v1/infra/aaa/localUsers/{login_id} - - ## Verb - - - PUT - """ - - class_name: Literal["V1InfraAaaLocalUsersPut"] = Field( - default="V1InfraAaaLocalUsersPut", - description="Class name for backward compatibility", - frozen=True, - ) - - @property - def verb(self) -> VerbEnum: - """Return the HTTP verb for this endpoint.""" - return VerbEnum.PUT - - -class V1InfraAaaLocalUsersDelete(_V1InfraAaaLocalUsersBase): - """ - # Summary - - ND Infra AAA Local Users DELETE Endpoint - - ## Description - - Endpoint to delete a local user from the ND Infra AAA service. - - ## Path - - - /api/v1/infra/aaa/localUsers/{login_id} - - ## Verb - - - DELETE - """ - - class_name: Literal["V1InfraAaaLocalUsersDelete"] = Field( - default="V1InfraAaaLocalUsersDelete", - description="Class name for backward compatibility", - frozen=True, - ) - - @property - def verb(self) -> VerbEnum: - """Return the HTTP verb for this endpoint.""" - return VerbEnum.DELETE diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index ddcb7569..651a9d30 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -12,7 +12,7 @@ from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType @@ -27,11 +27,11 @@ class NDBaseOrchestrator(BaseModel): model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] # NOTE: if not defined by subclasses, return an error as they are required - create_endpoint: Type[NDBaseEndpoint] - update_endpoint: Type[NDBaseEndpoint] - delete_endpoint: Type[NDBaseEndpoint] - query_one_endpoint: Type[NDBaseEndpoint] - query_all_endpoint: Type[NDBaseEndpoint] + create_endpoint: Type[NDEndpointBaseModel] + update_endpoint: Type[NDEndpointBaseModel] + delete_endpoint: Type[NDEndpointBaseModel] + query_one_endpoint: Type[NDEndpointBaseModel] + query_all_endpoint: Type[NDEndpointBaseModel] # NOTE: Module Field is always required sender: NDModule @@ -70,7 +70,7 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: def query_all(self, model_instance: Optional[NDBaseModel] = None, **kwargs) -> ResponseType: try: - result = self.sender.query_obj(self.query_all_endpoint.path) + result = self.sender.query_obj(self.query_all_endpoint().path) return result or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 5e52a00b..db7bbfdc 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -12,31 +12,31 @@ from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDBaseEndpoint +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra_aaa_local_users import ( - V1InfraAaaLocalUsersPost, - V1InfraAaaLocalUsersPut, - V1InfraAaaLocalUsersDelete, - V1InfraAaaLocalUsersGet, +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.aaa_local_users import ( + EpInfraAaaLocalUsersPost, + EpInfraAaaLocalUsersPut, + EpInfraAaaLocalUsersDelete, + EpInfraAaaLocalUsersGet, ) class LocalUserOrchestrator(NDBaseOrchestrator): model_class: Type[NDBaseModel] = LocalUserModel - create_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersPost - update_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersPut - delete_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersDelete - query_one_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersGet - query_all_endpoint: Type[NDBaseEndpoint] = V1InfraAaaLocalUsersGet + create_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersPost + update_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersPut + delete_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersDelete + query_one_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersGet + query_all_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersGet def query_all(self) -> ResponseType: """ Custom query_all action to extract 'localusers' from response. """ try: - result = self.sender.query_obj(self.query_all_endpoint.base_path) + result = self.sender.query_obj(self.query_all_endpoint().path) return result.get("localusers", []) or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_aaa_local_users.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_aaa_local_users.py new file mode 100644 index 00000000..71cfd9b6 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_infra_aaa_local_users.py @@ -0,0 +1,437 @@ +# Copyright: (c) 2026, Allen Robel (@arobel) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for infra_aaa_local_users.py + +Tests the ND Infra AAA endpoint classes +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.aaa_local_users import ( + EpInfraAaaLocalUsersDelete, + EpInfraAaaLocalUsersGet, + EpInfraAaaLocalUsersPost, + EpInfraAaaLocalUsersPut, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpInfraAaaLocalUsersGet +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00010(): + """ + # Summary + + Verify EpInfraAaaLocalUsersGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.__init__() + - EpInfraAaaLocalUsersGet.verb + - EpInfraAaaLocalUsersGet.class_name + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet() + assert instance.class_name == "EpInfraAaaLocalUsersGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_infra_aaa_00020(): + """ + # Summary + + Verify EpInfraAaaLocalUsersGet path without login_id + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers" when login_id is None + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.path + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet() + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers" + + +def test_endpoints_api_v1_infra_aaa_00030(): + """ + # Summary + + Verify EpInfraAaaLocalUsersGet path with login_id + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers/admin" when login_id is set + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.path + - EpInfraAaaLocalUsersGet.login_id + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet() + instance.login_id = "admin" + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers/admin" + + +def test_endpoints_api_v1_infra_aaa_00040(): + """ + # Summary + + Verify EpInfraAaaLocalUsersGet login_id can be set at instantiation + + ## Test + + - login_id can be provided during instantiation + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.__init__() + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet(login_id="testuser") + assert instance.login_id == "testuser" + assert instance.path == "/api/v1/infra/aaa/localUsers/testuser" + + +# ============================================================================= +# Test: EpInfraAaaLocalUsersPost +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00100(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpInfraAaaLocalUsersPost.__init__() + - EpInfraAaaLocalUsersPost.verb + - EpInfraAaaLocalUsersPost.class_name + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPost() + assert instance.class_name == "EpInfraAaaLocalUsersPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_infra_aaa_00110(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPost path + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers" for POST + + ## Classes and Methods + + - EpInfraAaaLocalUsersPost.path + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPost() + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers" + + +def test_endpoints_api_v1_infra_aaa_00120(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPost path with login_id + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers/admin" when login_id is set + + ## Classes and Methods + + - EpInfraAaaLocalUsersPost.path + - EpInfraAaaLocalUsersPost.login_id + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPost() + instance.login_id = "admin" + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers/admin" + + +# ============================================================================= +# Test: EpInfraAaaLocalUsersPut +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00200(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPut basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is PUT + + ## Classes and Methods + + - EpInfraAaaLocalUsersPut.__init__() + - EpInfraAaaLocalUsersPut.verb + - EpInfraAaaLocalUsersPut.class_name + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPut() + assert instance.class_name == "EpInfraAaaLocalUsersPut" + assert instance.verb == HttpVerbEnum.PUT + + +def test_endpoints_api_v1_infra_aaa_00210(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPut path with login_id + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers/admin" when login_id is set + + ## Classes and Methods + + - EpInfraAaaLocalUsersPut.path + - EpInfraAaaLocalUsersPut.login_id + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPut() + instance.login_id = "admin" + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers/admin" + + +def test_endpoints_api_v1_infra_aaa_00220(): + """ + # Summary + + Verify EpInfraAaaLocalUsersPut with complex login_id + + ## Test + + - login_id with special characters is handled correctly + + ## Classes and Methods + + - EpInfraAaaLocalUsersPut.path + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersPut(login_id="user-name_123") + assert instance.path == "/api/v1/infra/aaa/localUsers/user-name_123" + + +# ============================================================================= +# Test: EpInfraAaaLocalUsersDelete +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00300(): + """ + # Summary + + Verify EpInfraAaaLocalUsersDelete basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is DELETE + + ## Classes and Methods + + - EpInfraAaaLocalUsersDelete.__init__() + - EpInfraAaaLocalUsersDelete.verb + - EpInfraAaaLocalUsersDelete.class_name + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersDelete() + assert instance.class_name == "EpInfraAaaLocalUsersDelete" + assert instance.verb == HttpVerbEnum.DELETE + + +def test_endpoints_api_v1_infra_aaa_00310(): + """ + # Summary + + Verify EpInfraAaaLocalUsersDelete path with login_id + + ## Test + + - path returns "/api/v1/infra/aaa/localUsers/admin" when login_id is set + + ## Classes and Methods + + - EpInfraAaaLocalUsersDelete.path + - EpInfraAaaLocalUsersDelete.login_id + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersDelete() + instance.login_id = "admin" + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers/admin" + + +def test_endpoints_api_v1_infra_aaa_00320(): + """ + # Summary + + Verify EpInfraAaaLocalUsersDelete without login_id + + ## Test + + - path returns base path when login_id is None + + ## Classes and Methods + + - EpInfraAaaLocalUsersDelete.path + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersDelete() + result = instance.path + assert result == "/api/v1/infra/aaa/localUsers" + + +# ============================================================================= +# Test: All HTTP methods on same endpoint +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00400(): + """ + # Summary + + Verify all HTTP methods work correctly on same resource + + ## Test + + - GET, POST, PUT, DELETE all return correct paths for same login_id + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet + - EpInfraAaaLocalUsersPost + - EpInfraAaaLocalUsersPut + - EpInfraAaaLocalUsersDelete + """ + login_id = "testuser" + + with does_not_raise(): + get_ep = EpInfraAaaLocalUsersGet(login_id=login_id) + post_ep = EpInfraAaaLocalUsersPost(login_id=login_id) + put_ep = EpInfraAaaLocalUsersPut(login_id=login_id) + delete_ep = EpInfraAaaLocalUsersDelete(login_id=login_id) + + # All should have same path when login_id is set + expected_path = "/api/v1/infra/aaa/localUsers/testuser" + assert get_ep.path == expected_path + assert post_ep.path == expected_path + assert put_ep.path == expected_path + assert delete_ep.path == expected_path + + # But different verbs + assert get_ep.verb == HttpVerbEnum.GET + assert post_ep.verb == HttpVerbEnum.POST + assert put_ep.verb == HttpVerbEnum.PUT + assert delete_ep.verb == HttpVerbEnum.DELETE + + +# ============================================================================= +# Test: Pydantic validation +# ============================================================================= + + +def test_endpoints_api_v1_infra_aaa_00500(): + """ + # Summary + + Verify Pydantic validation for login_id + + ## Test + + - Empty string is rejected for login_id (min_length=1) + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.__init__() + """ + with pytest.raises(ValueError): + EpInfraAaaLocalUsersGet(login_id="") + + +def test_endpoints_api_v1_infra_aaa_00510(): + """ + # Summary + + Verify login_id can be None + + ## Test + + - login_id accepts None as valid value + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.__init__() + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet(login_id=None) + assert instance.login_id is None + + +def test_endpoints_api_v1_infra_aaa_00520(): + """ + # Summary + + Verify login_id can be modified after instantiation + + ## Test + + - login_id can be changed after object creation + + ## Classes and Methods + + - EpInfraAaaLocalUsersGet.login_id + """ + with does_not_raise(): + instance = EpInfraAaaLocalUsersGet() + assert instance.login_id is None + instance.login_id = "newuser" + assert instance.login_id == "newuser" + assert instance.path == "/api/v1/infra/aaa/localUsers/newuser" From 2f6de8a2e60ec86c4e042941edecba467e6c78ba Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 12 Mar 2026 14:33:06 -0400 Subject: [PATCH 057/109] [ignore] Update pydantic_compat.py to support extra Pydantic methods and classes. --- .../module_utils/common/pydantic_compat.py | 57 ++++- plugins/module_utils/endpoints/base.py | 1 - .../endpoints/v1/infra/aaa_local_users.py | 12 +- plugins/module_utils/models/base.py | 2 +- plugins/module_utils/models/local_user.py | 2 +- plugins/module_utils/nd_state_machine.py | 2 +- plugins/module_utils/orchestrators/base.py | 5 +- .../module_utils/orchestrators/local_user.py | 3 +- plugins/module_utils/pydantic_compat.py | 238 ------------------ .../module_utils/endpoints/test_base_model.py | 5 - 10 files changed, 61 insertions(+), 266 deletions(-) delete mode 100644 plugins/module_utils/pydantic_compat.py diff --git a/plugins/module_utils/common/pydantic_compat.py b/plugins/module_utils/common/pydantic_compat.py index e1550a18..b26559d2 100644 --- a/plugins/module_utils/common/pydantic_compat.py +++ b/plugins/module_utils/common/pydantic_compat.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) +# Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -34,10 +33,6 @@ # fmt: on # isort: on -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - import traceback from typing import TYPE_CHECKING, Any, Callable, Union @@ -51,11 +46,16 @@ Field, PydanticExperimentalWarning, StrictBool, + SecretStr, ValidationError, field_serializer, + model_serializer, field_validator, model_validator, validator, + computed_field, + FieldSerializationInfo, + SerializationInfo, ) HAS_PYDANTIC = True # pylint: disable=invalid-name @@ -71,11 +71,16 @@ Field, PydanticExperimentalWarning, StrictBool, + SecretStr, ValidationError, field_serializer, + model_serializer, field_validator, model_validator, validator, + computed_field, + FieldSerializationInfo, + SerializationInfo, ) except ImportError: HAS_PYDANTIC = False # pylint: disable=invalid-name @@ -127,6 +132,15 @@ def decorator(func): return decorator + # Fallback: model_serializer decorator that does nothing + def model_serializer(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic model_serializer fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + # Fallback: field_validator decorator that does nothing def field_validator(*args, **kwargs) -> Callable[..., Any]: # pylint: disable=unused-argument,invalid-name """Pydantic field_validator fallback when pydantic is not available.""" @@ -136,6 +150,15 @@ def decorator(func): return decorator + # Fallback: computed_field decorator that does nothing + def computed_field(*args, **kwargs): # pylint: disable=unused-argument + """Pydantic computed_field fallback when pydantic is not available.""" + + def decorator(func): + return func + + return decorator + # Fallback: AfterValidator that returns the function unchanged def AfterValidator(func): # pylint: disable=invalid-name """Pydantic AfterValidator fallback when pydantic is not available.""" @@ -152,6 +175,9 @@ def BeforeValidator(func): # pylint: disable=invalid-name # Fallback: StrictBool StrictBool = bool + # Fallback: SecretStr + SecretStr = str + # Fallback: ValidationError class ValidationError(Exception): """ @@ -183,6 +209,20 @@ def decorator(func): return decorator + # Fallback: FieldSerializationInfo placeholder class that does nothing + class FieldSerializationInfo: + """Pydantic FieldSerializationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + pass + + # Fallback: SerializationInfo placeholder class that does nothing + class SerializationInfo: + """Pydantic SerializationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + pass + else: HAS_PYDANTIC = True # pylint: disable=invalid-name PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name @@ -234,10 +274,15 @@ def main(): "PYDANTIC_IMPORT_ERROR", "PydanticExperimentalWarning", "StrictBool", + "SecretStr", "ValidationError", "field_serializer", + "model_serializer", "field_validator", "model_validator", "require_pydantic", "validator", + "computed_field", + "FieldSerializationInfo", + "SerializationInfo", ] diff --git a/plugins/module_utils/endpoints/base.py b/plugins/module_utils/endpoints/base.py index e5eb8c72..c3d7f4e1 100644 --- a/plugins/module_utils/endpoints/base.py +++ b/plugins/module_utils/endpoints/base.py @@ -133,6 +133,5 @@ def verb(self) -> HttpVerbEnum: """ # NOTE: function to set endpoints attribute fields from identifiers -> acts as the bridge between Models and Endpoints for API Request Orchestration - @abstractmethod def set_identifiers(self, identifier: IdentifierKey = None): pass diff --git a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py index 925c5548..26660622 100644 --- a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py +++ b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py @@ -10,15 +10,7 @@ from __future__ import absolute_import, annotations, division, print_function -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Literal - +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import Field from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import LoginIdMixin @@ -49,7 +41,7 @@ def path(self) -> str: if self.login_id is not None: return BasePath.path("aaa", "localUsers", self.login_id) return BasePath.path("aaa", "localUsers") - + def set_identifiers(self, identifier: IdentifierKey = None): self.login_id = identifier diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 21fb983e..07b6ee28 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -9,7 +9,7 @@ __metaclass__ = type from abc import ABC -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel, ConfigDict from typing import List, Dict, Any, ClassVar, Set, Tuple, Union, Literal, Optional from ansible_collections.cisco.nd.plugins.module_utils.utils import issubset diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index 38f2b5d2..a47a4a0a 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -9,7 +9,7 @@ __metaclass__ = type from typing import List, Dict, Any, Optional, ClassVar, Literal -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ( +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, SecretStr, model_serializer, diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 3840b360..efed3517 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -9,7 +9,7 @@ __metaclass__ = type from typing import Type -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import ValidationError +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 651a9d30..1f4e3e69 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -8,7 +8,7 @@ __metaclass__ = type -from ansible_collections.cisco.nd.plugins.module_utils.pydantic_compat import BaseModel, ConfigDict +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel, ConfigDict from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule @@ -70,7 +70,8 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: def query_all(self, model_instance: Optional[NDBaseModel] = None, **kwargs) -> ResponseType: try: - result = self.sender.query_obj(self.query_all_endpoint().path) + api_endpoint = self.query_all_endpoint() + result = self.sender.query_obj(api_endpoint.path) return result or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index db7bbfdc..689ba9dc 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -36,7 +36,8 @@ def query_all(self) -> ResponseType: Custom query_all action to extract 'localusers' from response. """ try: - result = self.sender.query_obj(self.query_all_endpoint().path) + api_endpoint = self.query_all_endpoint() + result = self.sender.query_obj(api_endpoint.path) return result.get("localusers", []) or [] except Exception as e: raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/pydantic_compat.py b/plugins/module_utils/pydantic_compat.py deleted file mode 100644 index 2596d852..00000000 --- a/plugins/module_utils/pydantic_compat.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Allen Robel (@arobel) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -# pylint: disable=too-few-public-methods -""" -Pydantic compatibility layer. - -This module provides a single location for Pydantic imports with fallback -implementations when Pydantic is not available. This ensures consistent -behavior across all modules and follows the DRY principle. -""" - -from __future__ import absolute_import, annotations, division, print_function - -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - -import traceback -from typing import TYPE_CHECKING, Any, Callable, Union - -if TYPE_CHECKING: - # Type checkers always see the real Pydantic types - from pydantic import ( - AfterValidator, - BaseModel, - BeforeValidator, - ConfigDict, - Field, - PydanticExperimentalWarning, - StrictBool, - SecretStr, - ValidationError, - field_serializer, - model_serializer, - field_validator, - model_validator, - validator, - computed_field, - FieldSerializationInfo, - SerializationInfo, - ) -else: - # Runtime: try to import, with fallback - try: - from pydantic import ( - AfterValidator, - BaseModel, - BeforeValidator, - ConfigDict, - Field, - PydanticExperimentalWarning, - StrictBool, - SecretStr, - ValidationError, - field_serializer, - model_serializer, - field_validator, - model_validator, - validator, - computed_field, - FieldSerializationInfo, - SerializationInfo, - ) - except ImportError: - HAS_PYDANTIC = False # pylint: disable=invalid-name - PYDANTIC_IMPORT_ERROR: Union[str, None] = traceback.format_exc() # pylint: disable=invalid-name - - # Fallback: Minimal BaseModel replacement - class BaseModel: - """Fallback BaseModel when pydantic is not available.""" - - model_config = {"validate_assignment": False, "use_enum_values": False} - - def __init__(self, **kwargs): - """Accept keyword arguments and set them as attributes.""" - for key, value in kwargs.items(): - setattr(self, key, value) - - def model_dump(self, exclude_none: bool = False, exclude_defaults: bool = False) -> dict: # pylint: disable=unused-argument - """Return a dictionary of field names and values. - - Args: - exclude_none: If True, exclude fields with None values - exclude_defaults: Accepted for API compatibility but not implemented in fallback - """ - result = {} - for key, value in self.__dict__.items(): - if exclude_none and value is None: - continue - result[key] = value - return result - - # Fallback: ConfigDict that does nothing - def ConfigDict(**kwargs) -> dict: # pylint: disable=unused-argument,invalid-name - """Pydantic ConfigDict fallback when pydantic is not available.""" - return kwargs - - # Fallback: Field that does nothing - def Field(**kwargs) -> Any: # pylint: disable=unused-argument,invalid-name - """Pydantic Field fallback when pydantic is not available.""" - if "default_factory" in kwargs: - return kwargs["default_factory"]() - return kwargs.get("default") - - # Fallback: field_serializer decorator that does nothing - def field_serializer(*args, **kwargs): # pylint: disable=unused-argument - """Pydantic field_serializer fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: model_serializer decorator that does nothing - def model_serializer(*args, **kwargs): # pylint: disable=unused-argument - """Pydantic model_serializer fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: field_validator decorator that does nothing - def field_validator(*args, **kwargs) -> Callable[..., Any]: # pylint: disable=unused-argument,invalid-name - """Pydantic field_validator fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: computed_field decorator that does nothing - def computed_field(*args, **kwargs): # pylint: disable=unused-argument - """Pydantic computed_field fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: AfterValidator that returns the function unchanged - def AfterValidator(func): # pylint: disable=invalid-name - """Pydantic AfterValidator fallback when pydantic is not available.""" - return func - - # Fallback: BeforeValidator that returns the function unchanged - def BeforeValidator(func): # pylint: disable=invalid-name - """Pydantic BeforeValidator fallback when pydantic is not available.""" - return func - - # Fallback: PydanticExperimentalWarning - PydanticExperimentalWarning = Warning - - # Fallback: StrictBool - StrictBool = bool - - # Fallback: SecretStr - SecretStr = str - - # Fallback: ValidationError - class ValidationError(Exception): - """ - Pydantic ValidationError fallback when pydantic is not available. - """ - - def __init__(self, message="A custom error occurred."): - self.message = message - super().__init__(self.message) - - def __str__(self): - return f"ValidationError: {self.message}" - - # Fallback: model_validator decorator that does nothing - def model_validator(*args, **kwargs): # pylint: disable=unused-argument - """Pydantic model_validator fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: validator decorator that does nothing - def validator(*args, **kwargs): # pylint: disable=unused-argument - """Pydantic validator fallback when pydantic is not available.""" - - def decorator(func): - return func - - return decorator - - # Fallback: FieldSerializationInfo placeholder class that does nothing - class FieldSerializationInfo: - """Pydantic FieldSerializationInfo fallback when pydantic is not available.""" - - def __init__(self, **kwargs): - pass - - # Fallback: SerializationInfo placeholder class that does nothing - class SerializationInfo: - """Pydantic SerializationInfo fallback when pydantic is not available.""" - - def __init__(self, **kwargs): - pass - - else: - HAS_PYDANTIC = True # pylint: disable=invalid-name - PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name - -# Set HAS_PYDANTIC for when TYPE_CHECKING is True -if TYPE_CHECKING: - HAS_PYDANTIC = True # pylint: disable=invalid-name - PYDANTIC_IMPORT_ERROR = None # pylint: disable=invalid-name - -__all__ = [ - "AfterValidator", - "BaseModel", - "BeforeValidator", - "ConfigDict", - "Field", - "HAS_PYDANTIC", - "PYDANTIC_IMPORT_ERROR", - "PydanticExperimentalWarning", - "StrictBool", - "SecretStr", - "ValidationError", - "field_serializer", - "model_serializer", - "field_validator", - "model_validator", - "validator", - "computed_field", - "FieldSerializationInfo", - "SerializationInfo", -] diff --git a/tests/unit/module_utils/endpoints/test_base_model.py b/tests/unit/module_utils/endpoints/test_base_model.py index a14da9d8..ce9d1e8d 100644 --- a/tests/unit/module_utils/endpoints/test_base_model.py +++ b/tests/unit/module_utils/endpoints/test_base_model.py @@ -99,7 +99,6 @@ def test_base_model_00200(): with pytest.raises(TypeError, match=match): class _BadEndpoint(NDEndpointBaseModel): - @property def path(self) -> str: return "/api/v1/test/bad" @@ -132,7 +131,6 @@ def test_base_model_00300(): """ class _MiddleABC(NDEndpointBaseModel, ABC): - @property @abstractmethod def extra(self) -> str: @@ -182,7 +180,6 @@ def test_base_model_00310(): """ class _MiddleABC2(NDEndpointBaseModel, ABC): - @property @abstractmethod def extra(self) -> str: @@ -192,7 +189,6 @@ def extra(self) -> str: with pytest.raises(TypeError, match=match): class _BadConcreteFromMiddle(_MiddleABC2): - @property def path(self) -> str: return "/api/v1/test/bad-middle" @@ -229,7 +225,6 @@ def test_base_model_00400(): with pytest.raises(TypeError, match=r'Literal\["_ExampleEndpoint"\]') as exc_info: class _ExampleEndpoint(NDEndpointBaseModel): - @property def path(self) -> str: return "/api/v1/test/example" From 79dc00094f45f100affa659c91d83376547942e1 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 17 Mar 2026 12:01:56 -0400 Subject: [PATCH 058/109] [ignore] Remove Python 2.7 compatibilities. --- plugins/module_utils/common/exceptions.py | 4 ---- plugins/module_utils/common/log.py | 6 ------ plugins/module_utils/constants.py | 4 ---- plugins/module_utils/endpoints/enums.py | 4 ---- plugins/module_utils/endpoints/mixins.py | 1 - plugins/module_utils/endpoints/query_params.py | 2 -- plugins/module_utils/endpoints/v1/infra/base_path.py | 2 -- plugins/module_utils/endpoints/v1/infra/login.py | 2 -- plugins/module_utils/endpoints/v1/manage/base_path.py | 2 -- plugins/module_utils/enums.py | 5 ----- plugins/module_utils/models/base.py | 4 ---- plugins/module_utils/models/local_user.py | 4 ---- plugins/module_utils/models/nested.py | 4 ---- plugins/module_utils/nd.py | 4 ---- plugins/module_utils/nd_argument_specs.py | 4 ---- plugins/module_utils/nd_config_collection.py | 4 ---- plugins/module_utils/nd_output.py | 4 ---- plugins/module_utils/nd_state_machine.py | 4 ---- plugins/module_utils/nd_v2.py | 6 ------ plugins/module_utils/ndi.py | 3 --- plugins/module_utils/ndi_argument_specs.py | 4 ---- plugins/module_utils/orchestrators/base.py | 4 ---- plugins/module_utils/orchestrators/local_user.py | 4 ---- plugins/module_utils/orchestrators/types.py | 4 ---- plugins/module_utils/rest/protocols/response_handler.py | 3 +-- plugins/module_utils/rest/protocols/response_validation.py | 4 +--- plugins/module_utils/rest/protocols/sender.py | 4 ++-- plugins/module_utils/rest/response_handler_nd.py | 4 +--- .../rest/response_strategies/nd_v1_strategy.py | 4 +--- plugins/module_utils/rest/rest_send.py | 1 - plugins/module_utils/rest/results.py | 2 -- plugins/module_utils/rest/sender_nd.py | 4 +--- plugins/module_utils/types.py | 4 ---- plugins/module_utils/utils.py | 4 ---- plugins/modules/nd_local_user.py | 7 +------ 35 files changed, 8 insertions(+), 122 deletions(-) diff --git a/plugins/module_utils/common/exceptions.py b/plugins/module_utils/common/exceptions.py index 0d7b7bcc..0c53c2c2 100644 --- a/plugins/module_utils/common/exceptions.py +++ b/plugins/module_utils/common/exceptions.py @@ -15,10 +15,6 @@ # fmt: on # isort: on -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - from typing import Any, Optional from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( diff --git a/plugins/module_utils/common/log.py b/plugins/module_utils/common/log.py index 29182539..f43d9018 100644 --- a/plugins/module_utils/common/log.py +++ b/plugins/module_utils/common/log.py @@ -1,15 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - import json import logging from enum import Enum diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index 563041a0..adbe345e 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2022, Akini Ross (@akinross) # Copyright: (c) 2024, Gaspard Micol (@gmicol) @@ -7,8 +5,6 @@ from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Dict from types import MappingProxyType from copy import deepcopy diff --git a/plugins/module_utils/endpoints/enums.py b/plugins/module_utils/endpoints/enums.py index 802b8fe8..92ae5783 100644 --- a/plugins/module_utils/endpoints/enums.py +++ b/plugins/module_utils/endpoints/enums.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@allenrobel) # Copyright: (c) 2026, Gaspard Micol (@gmicol) @@ -10,8 +8,6 @@ from __future__ import absolute_import, division, print_function -__metaclass__ = type - from enum import Enum diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index 22d9a2dc..e7f0620c 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -11,7 +11,6 @@ from __future__ import absolute_import, annotations, division, print_function - from typing import Optional from ansible_collections.cisco.nd.plugins.module_utils.enums import BooleanStringEnum diff --git a/plugins/module_utils/endpoints/query_params.py b/plugins/module_utils/endpoints/query_params.py index 5bf8ff08..2cddd97d 100644 --- a/plugins/module_utils/endpoints/query_params.py +++ b/plugins/module_utils/endpoints/query_params.py @@ -11,12 +11,10 @@ from __future__ import absolute_import, annotations, division, print_function - from enum import Enum from typing import Optional, Protocol from urllib.parse import quote - from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( BaseModel, Field, diff --git a/plugins/module_utils/endpoints/v1/infra/base_path.py b/plugins/module_utils/endpoints/v1/infra/base_path.py index f0612025..0db15ae9 100644 --- a/plugins/module_utils/endpoints/v1/infra/base_path.py +++ b/plugins/module_utils/endpoints/v1/infra/base_path.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/endpoints/v1/infra/login.py b/plugins/module_utils/endpoints/v1/infra/login.py index 70968615..6fff9159 100644 --- a/plugins/module_utils/endpoints/v1/infra/login.py +++ b/plugins/module_utils/endpoints/v1/infra/login.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/endpoints/v1/manage/base_path.py b/plugins/module_utils/endpoints/v1/manage/base_path.py index 5f043ced..52bb4e56 100644 --- a/plugins/module_utils/endpoints/v1/manage/base_path.py +++ b/plugins/module_utils/endpoints/v1/manage/base_path.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/enums.py b/plugins/module_utils/enums.py index 55d1f1ac..83f1f76d 100644 --- a/plugins/module_utils/enums.py +++ b/plugins/module_utils/enums.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # pylint: disable=wrong-import-position # pylint: disable=missing-module-docstring # Copyright: (c) 2026, Allen Robel (@allenrobel) @@ -21,10 +20,6 @@ # fmt: on # isort: on -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - from enum import Enum diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index 07b6ee28..a62a12b1 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from abc import ABC from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel, ConfigDict from typing import List, Dict, Any, ClassVar, Set, Tuple, Union, Literal, Optional diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user.py index a47a4a0a..6d4960f3 100644 --- a/plugins/module_utils/models/local_user.py +++ b/plugins/module_utils/models/local_user.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import List, Dict, Any, Optional, ClassVar, Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, diff --git a/plugins/module_utils/models/nested.py b/plugins/module_utils/models/nested.py index 0573e5f8..c3af1d71 100644 --- a/plugins/module_utils/models/nested.py +++ b/plugins/module_utils/models/nested.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import List, ClassVar from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index 42b1b118..50a5eeb2 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2021, Lionel Hercot (@lhercot) # Copyright: (c) 2022, Cindy Zhao (@cizhao) # Copyright: (c) 2022, Akini Ross (@akinross) @@ -9,8 +7,6 @@ from __future__ import absolute_import, division, print_function from functools import reduce -__metaclass__ = type - from copy import deepcopy import os import shutil diff --git a/plugins/module_utils/nd_argument_specs.py b/plugins/module_utils/nd_argument_specs.py index 7ef10d04..798ca90f 100644 --- a/plugins/module_utils/nd_argument_specs.py +++ b/plugins/module_utils/nd_argument_specs.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2023, Shreyas Srish (@shrsr) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - def ntp_server_spec(): return dict( diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index abcfc0f7..0da7247f 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Optional, List, Dict, Any, Literal from copy import deepcopy from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel diff --git a/plugins/module_utils/nd_output.py b/plugins/module_utils/nd_output.py index 0e5ed6ef..8088b09b 100644 --- a/plugins/module_utils/nd_output.py +++ b/plugins/module_utils/nd_output.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Dict, Any, Optional, List, Union from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index efed3517..e3ea328c 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Type from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/module_utils/nd_v2.py b/plugins/module_utils/nd_v2.py index 0a3fe61a..a622d77f 100644 --- a/plugins/module_utils/nd_v2.py +++ b/plugins/module_utils/nd_v2.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -47,10 +45,6 @@ def main(): # fmt: on # isort: on -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - import logging from typing import Any, Optional diff --git a/plugins/module_utils/ndi.py b/plugins/module_utils/ndi.py index 37e7ec56..6ff912aa 100644 --- a/plugins/module_utils/ndi.py +++ b/plugins/module_utils/ndi.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2021, Lionel Hercot (@lhercot) # Copyright: (c) 2022, Cindy Zhao (@cizhao) # Copyright: (c) 2022, Akini Ross (@akinross) @@ -16,7 +14,6 @@ HAS_JSONPATH_NG_PARSE = True except ImportError: HAS_JSONPATH_NG_PARSE = False -__metaclass__ = type from ansible_collections.cisco.nd.plugins.module_utils.constants import OBJECT_TYPES, MATCH_TYPES diff --git a/plugins/module_utils/ndi_argument_specs.py b/plugins/module_utils/ndi_argument_specs.py index 641e675c..a367e3c5 100644 --- a/plugins/module_utils/ndi_argument_specs.py +++ b/plugins/module_utils/ndi_argument_specs.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2022, Akini Ross (@akinross) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from ansible_collections.cisco.nd.plugins.module_utils.constants import MATCH_TYPES, OPERATORS, TCP_FLAGS diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index 1f4e3e69..fe16a524 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel, ConfigDict from typing import ClassVar, Type, Optional from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 689ba9dc..332719bf 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Type from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel diff --git a/plugins/module_utils/orchestrators/types.py b/plugins/module_utils/orchestrators/types.py index b721c65b..415526c7 100644 --- a/plugins/module_utils/orchestrators/types.py +++ b/plugins/module_utils/orchestrators/types.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Any, Union, List, Dict ResponseType = Union[List[Dict[str, Any]], Dict[str, Any], None] diff --git a/plugins/module_utils/rest/protocols/response_handler.py b/plugins/module_utils/rest/protocols/response_handler.py index 487e12cf..ab658c99 100644 --- a/plugins/module_utils/rest/protocols/response_handler.py +++ b/plugins/module_utils/rest/protocols/response_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # pylint: disable=missing-module-docstring # pylint: disable=unnecessary-ellipsis # pylint: disable=wrong-import-position @@ -13,7 +12,7 @@ # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name """ diff --git a/plugins/module_utils/rest/protocols/response_validation.py b/plugins/module_utils/rest/protocols/response_validation.py index d1ec5ef0..30a81b97 100644 --- a/plugins/module_utils/rest/protocols/response_validation.py +++ b/plugins/module_utils/rest/protocols/response_validation.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -26,7 +24,7 @@ # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name try: diff --git a/plugins/module_utils/rest/protocols/sender.py b/plugins/module_utils/rest/protocols/sender.py index 5e55047c..df9f4d1b 100644 --- a/plugins/module_utils/rest/protocols/sender.py +++ b/plugins/module_utils/rest/protocols/sender.py @@ -1,7 +1,7 @@ # pylint: disable=wrong-import-position # pylint: disable=missing-module-docstring # pylint: disable=unnecessary-ellipsis -# -*- coding: utf-8 -*- + # Copyright: (c) 2026, Allen Robel (@arobel) @@ -15,7 +15,7 @@ # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name try: diff --git a/plugins/module_utils/rest/response_handler_nd.py b/plugins/module_utils/rest/response_handler_nd.py index e7026d30..f0f30b94 100644 --- a/plugins/module_utils/rest/response_handler_nd.py +++ b/plugins/module_utils/rest/response_handler_nd.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -62,7 +60,7 @@ class (e.g. `NdV2Strategy`) conforming to `ResponseValidationStrategy` and injec # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name import copy diff --git a/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py b/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py index 58c7784f..a5953789 100644 --- a/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py +++ b/plugins/module_utils/rest/response_strategies/nd_v1_strategy.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -25,7 +23,7 @@ # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name from typing import Any, Optional diff --git a/plugins/module_utils/rest/rest_send.py b/plugins/module_utils/rest/rest_send.py index c87009a5..7631b0dd 100644 --- a/plugins/module_utils/rest/rest_send.py +++ b/plugins/module_utils/rest/rest_send.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # pylint: disable=wrong-import-position # pylint: disable=missing-module-docstring # Copyright: (c) 2026, Allen Robel (@arobel) diff --git a/plugins/module_utils/rest/results.py b/plugins/module_utils/rest/results.py index 59281683..faee00dc 100644 --- a/plugins/module_utils/rest/results.py +++ b/plugins/module_utils/rest/results.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/rest/sender_nd.py b/plugins/module_utils/rest/sender_nd.py index ae333dd0..b5ed9b85 100644 --- a/plugins/module_utils/rest/sender_nd.py +++ b/plugins/module_utils/rest/sender_nd.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Allen Robel (@arobel) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -17,7 +15,7 @@ # isort: on # pylint: disable=invalid-name -__metaclass__ = type + # pylint: enable=invalid-name import copy diff --git a/plugins/module_utils/types.py b/plugins/module_utils/types.py index 3111a095..b0056d5a 100644 --- a/plugins/module_utils/types.py +++ b/plugins/module_utils/types.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from typing import Any, Union, Tuple IdentifierKey = Union[str, int, Tuple[Any, ...]] diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 2e62c6eb..7d05e4af 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -1,13 +1,9 @@ -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - from copy import deepcopy from typing import Any, Dict, List, Union diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index f5efea03..25d04fb5 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -1,14 +1,9 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - # Copyright: (c) 2026, Gaspard Micol (@gmicol) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} DOCUMENTATION = r""" @@ -112,7 +107,7 @@ - cisco.nd.modules - cisco.nd.check_mode notes: -- This module is only supported on Nexus Dashboard having version 4.1.0 or higher. +- This module is only supported on Nexus Dashboard having version 4.2.1 or higher. - This module is not idempotent when creating or updating a local user object when O(config.user_password) is used. """ From 2c7ec7837618a2aead4f3432e93c28c9839be12b Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 17 Mar 2026 12:50:52 -0400 Subject: [PATCH 059/109] [ignore] Fix comments and docstrings. made and static methods for class. --- .../endpoints/v1/infra/aaa_local_users.py | 12 ++++++------ plugins/module_utils/nd_config_collection.py | 13 ++++++------- plugins/module_utils/nd_output.py | 2 +- plugins/module_utils/nd_state_machine.py | 4 ++-- plugins/modules/nd_local_user.py | 2 +- .../targets/nd_local_user/tasks/main.yml | 4 ++-- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py index 26660622..ea3b1f4b 100644 --- a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py +++ b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py @@ -32,7 +32,7 @@ def path(self) -> str: """ # Summary - Build the endpoint path. + Build the /api/v1/infra/aaa/localUsers endpoint path. ## Returns @@ -70,12 +70,12 @@ class EpInfraAaaLocalUsersGet(_EpInfraAaaLocalUsersBase): ```python # Get all local users - request = EpApiV1InfraAaaLocalUsersGet() + request = EpInfraAaaLocalUsersGet() path = request.path verb = request.verb # Get specific local user - request = EpApiV1InfraAaaLocalUsersGet() + request = EpInfraAaaLocalUsersGet() request.login_id = "admin" path = request.path verb = request.verb @@ -111,7 +111,7 @@ class EpInfraAaaLocalUsersPost(_EpInfraAaaLocalUsersBase): ## Usage ```python - request = EpApiV1InfraAaaLocalUsersPost() + request = EpInfraAaaLocalUsersPost() path = request.path verb = request.verb ``` @@ -148,7 +148,7 @@ class EpInfraAaaLocalUsersPut(_EpInfraAaaLocalUsersBase): ## Usage ```python - request = EpApiV1InfraAaaLocalUsersPut() + request = EpInfraAaaLocalUsersPut() request.login_id = "admin" path = request.path verb = request.verb @@ -184,7 +184,7 @@ class EpInfraAaaLocalUsersDelete(_EpInfraAaaLocalUsersBase): ## Usage ```python - request = EpApiV1InfraAaaLocalUsersDelete() + request = EpInfraAaaLocalUsersDelete() request.login_id = "admin" path = request.path verb = request.verb diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 0da7247f..832cc132 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -119,7 +119,6 @@ def delete(self, key: IdentifierKey) -> bool: # Diff Operations - # NOTE: Maybe add a similar one in the NDBaseModel (-> but is it necessary?) def get_diff_config(self, new_item: NDBaseModel) -> Literal["new", "no_diff", "changed"]: """ Compare single item against collection. @@ -198,18 +197,18 @@ def to_payload_list(self, **kwargs) -> List[Dict[str, Any]]: """ return [item.to_payload(**kwargs) for item in self._items] - @classmethod - def from_ansible_config(cls, data: List[Dict], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": + @staticmethod + def from_ansible_config(data: List[Dict], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": """ Create collection from Ansible config. """ items = [model_class.from_config(item_data, **kwargs) for item_data in data] - return cls(model_class=model_class, items=items) + return NDConfigCollection(model_class=model_class, items=items) - @classmethod - def from_api_response(cls, response_data: List[Dict[str, Any]], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": + @staticmethod + def from_api_response(response_data: List[Dict[str, Any]], model_class: type[NDBaseModel], **kwargs) -> "NDConfigCollection": """ Create collection from API response. """ items = [model_class.from_response(item_data, **kwargs) for item_data in response_data] - return cls(model_class=model_class, items=items) + return NDConfigCollection(model_class=model_class, items=items) diff --git a/plugins/module_utils/nd_output.py b/plugins/module_utils/nd_output.py index 8088b09b..09759b96 100644 --- a/plugins/module_utils/nd_output.py +++ b/plugins/module_utils/nd_output.py @@ -34,7 +34,7 @@ def format(self, **kwargs) -> Dict[str, Any]: if self._output_level in ("debug", "info"): output["proposed"] = self._proposed.to_ansible_config() if isinstance(self._proposed, NDConfigCollection) else self._proposed if self._output_level == "debug": - output["logs"] = "Not yet implemented" + output["logs"] = self._logs if self._extra: output.update(self._extra) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index e3ea328c..d6af1c6f 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -16,12 +16,12 @@ class NDStateMachine: """ - Generic Network Resource Module for Nexus Dashboard. + Generic State Machine for Nexus Dashboard. """ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchestrator]): """ - Initialize the Network Resource Module. + Initialize the ND State Machine. """ self.module = module self.nd_module = NDModule(self.module) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 25d04fb5..f672cc91 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -9,7 +9,7 @@ DOCUMENTATION = r""" --- module: nd_local_user -version_added: "1.4.0" +version_added: "1.6.0" short_description: Manage local users on Cisco Nexus Dashboard description: - Manage local users on Cisco Nexus Dashboard (ND). diff --git a/tests/integration/targets/nd_local_user/tasks/main.yml b/tests/integration/targets/nd_local_user/tasks/main.yml index b7f205ae..c4540568 100644 --- a/tests/integration/targets/nd_local_user/tasks/main.yml +++ b/tests/integration/targets/nd_local_user/tasks/main.yml @@ -536,7 +536,7 @@ # DELETE -- name: Delete local user by name (check mode) +- name: Delete local user (check mode) cisco.nd.nd_local_user: &delete_local_user <<: *nd_info config: @@ -545,7 +545,7 @@ check_mode: true register: cm_delete_local_user -- name: Delete local user by name (normal mode) +- name: Delete local user (normal mode) cisco.nd.nd_local_user: <<: *delete_local_user register: nm_delete_local_user From 7142c4369f54c73d0ffa3ed0bd2172c5b7bb8517 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 18 Mar 2026 13:25:47 -0400 Subject: [PATCH 060/109] [ignore] Slightly modify Exceptions handling in NDStateMachine. Remove self.send from check_mode guards in NDStateMachine. Fix documentation for nd_local_user. --- plugins/module_utils/nd_state_machine.py | 25 ++++++++++++------------ plugins/modules/nd_local_user.py | 2 +- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index d6af1c6f..37324020 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -45,16 +45,18 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest self.sent = NDConfigCollection(model_class=self.model_class) # Collection of configuration objects given by user self.proposed = NDConfigCollection(model_class=self.model_class) + for config in self.module.params.get("config", []): - try: - # Parse config into model - item = self.model_class.from_config(config) - self.proposed.add(item) - except ValidationError as e: - raise NDStateMachineError(f"Invalid configuration. for config {config}: {str(e)}") + # Parse config into model + item = self.model_class.from_config(config) + self.proposed.add(item) + self.output.assign(after=self.existing, before=self.before, proposed=self.proposed) + + except ValidationError as e: + raise NDStateMachineError(f"Invalid configuration. for config {config}: {str(e)}") from e except Exception as e: - raise NDStateMachineError(f"Initialization failed: {str(e)}") + raise NDStateMachineError(f"Initialization failed: {str(e)}") from e # State Management (core function) def manage_state(self) -> None: @@ -105,11 +107,10 @@ def _manage_create_update_state(self) -> None: if diff_status == "changed": if not self.module.check_mode: self.model_orchestrator.update(final_item) - self.sent.add(final_item) elif diff_status == "new": if not self.module.check_mode: self.model_orchestrator.create(final_item) - self.sent.add(final_item) + self.sent.add(final_item) # Log operation self.output.assign(after=self.existing) @@ -117,7 +118,7 @@ def _manage_create_update_state(self) -> None: except Exception as e: error_msg = f"Failed to process {identifier}: {e}" if not self.module.params.get("ignore_errors", False): - raise NDStateMachineError(error_msg) + raise NDStateMachineError(error_msg) from e def _manage_override_deletions(self) -> None: """ @@ -144,7 +145,7 @@ def _manage_override_deletions(self) -> None: except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" if not self.module.params.get("ignore_errors", False): - raise NDStateMachineError(error_msg) + raise NDStateMachineError(error_msg) from e def _manage_delete_state(self) -> None: """Handle deleted state.""" @@ -169,4 +170,4 @@ def _manage_delete_state(self) -> None: except Exception as e: error_msg = f"Failed to delete {identifier}: {e}" if not self.module.params.get("ignore_errors", False): - raise NDStateMachineError(error_msg) + raise NDStateMachineError(error_msg) from e diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index f672cc91..d6c02d00 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -13,7 +13,7 @@ short_description: Manage local users on Cisco Nexus Dashboard description: - Manage local users on Cisco Nexus Dashboard (ND). -- It supports creating, updating, querying, and deleting local users. +- It supports creating, updating, and deleting local users. author: - Gaspard Micol (@gmicol) options: From bc5cfb088f604bd61e0c84faebb0544f48f6cebd Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Wed, 18 Mar 2026 13:31:33 -0400 Subject: [PATCH 061/109] [ignore] Rename aaa_local_users.py to infra_aaa_local_users.py. Move models/local_user.py to new dir models/local_user. --- .../v1/infra/{aaa_local_users.py => infra_aaa_local_users.py} | 0 plugins/module_utils/models/{ => local_user}/local_user.py | 0 plugins/module_utils/orchestrators/local_user.py | 4 ++-- plugins/modules/nd_local_user.py | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename plugins/module_utils/endpoints/v1/infra/{aaa_local_users.py => infra_aaa_local_users.py} (100%) rename plugins/module_utils/models/{ => local_user}/local_user.py (100%) diff --git a/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra/infra_aaa_local_users.py similarity index 100% rename from plugins/module_utils/endpoints/v1/infra/aaa_local_users.py rename to plugins/module_utils/endpoints/v1/infra/infra_aaa_local_users.py diff --git a/plugins/module_utils/models/local_user.py b/plugins/module_utils/models/local_user/local_user.py similarity index 100% rename from plugins/module_utils/models/local_user.py rename to plugins/module_utils/models/local_user/local_user.py diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 332719bf..0c2a6bf8 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -7,10 +7,10 @@ from typing import Type from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel +from ansible_collections.cisco.nd.plugins.module_utils.models.local_user.local_user import LocalUserModel from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.aaa_local_users import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.infra_aaa_local_users import ( EpInfraAaaLocalUsersPost, EpInfraAaaLocalUsersPut, EpInfraAaaLocalUsersDelete, diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index d6c02d00..53680e99 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -173,7 +173,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine -from ansible_collections.cisco.nd.plugins.module_utils.models.local_user import LocalUserModel +from ansible_collections.cisco.nd.plugins.module_utils.models.local_user.local_user import LocalUserModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.local_user import LocalUserOrchestrator From ed33a5a60f7665f3b4743e84626ef5943a1d7704 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 19 Mar 2026 12:54:08 -0400 Subject: [PATCH 062/109] [ignore] Update integration tests for nd_local_user module. --- .../targets/nd_local_user/tasks/main.yml | 1136 ++++++++++++----- 1 file changed, 800 insertions(+), 336 deletions(-) diff --git a/tests/integration/targets/nd_local_user/tasks/main.yml b/tests/integration/targets/nd_local_user/tasks/main.yml index c4540568..c76e22c3 100644 --- a/tests/integration/targets/nd_local_user/tasks/main.yml +++ b/tests/integration/targets/nd_local_user/tasks/main.yml @@ -14,7 +14,7 @@ output_level: '{{ api_key_output_level | default("debug") }}' - name: Ensure local users do not exist before test starts - cisco.nd.nd_local_user: + cisco.nd.nd_local_user: &clean_all_local_users <<: *nd_info config: - login_id: ansible_local_user @@ -22,9 +22,12 @@ - login_id: ansible_local_user_3 state: deleted -# CREATE -- name: Create local users with full and minimum configuration (check mode) - cisco.nd.nd_local_user: &create_local_user + +# --- MERGED STATE TESTS --- + +# MERGED STATE TESTS: CREATE +- name: Create local users with full and minimum configuration (merged state - check mode) + cisco.nd.nd_local_user: &create_local_user_merged_state <<: *nd_info config: - email: ansibleuser@example.com @@ -47,151 +50,124 @@ - name: all state: merged check_mode: true - register: cm_create_local_users + register: cm_merged_create_local_users -- name: Create local users with full and minimum configuration (normal mode) +- name: Create local users with full and minimum configuration (merged state - normal mode) cisco.nd.nd_local_user: - <<: *create_local_user - register: nm_create_local_users + <<: *create_local_user_merged_state + register: nm_merged_create_local_users -- name: Asserts for local users creation tasks +- name: Asserts for local users merged state creation tasks ansible.builtin.assert: that: - - cm_create_local_users is changed - - cm_create_local_users.after | length == 3 - - cm_create_local_users.after.0.login_id == "admin" - - cm_create_local_users.after.0.first_name == "admin" - - cm_create_local_users.after.0.remote_user_authorization == false - - cm_create_local_users.after.0.reuse_limitation == 0 - - cm_create_local_users.after.0.security_domains | length == 1 - - cm_create_local_users.after.0.security_domains.0.name == "all" - - cm_create_local_users.after.0.security_domains.0.roles | length == 1 - - cm_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" - - cm_create_local_users.after.0.time_interval_limitation == 0 - - cm_create_local_users.after.1.email == "ansibleuser@example.com" - - cm_create_local_users.after.1.first_name == "Ansible first name" - - cm_create_local_users.after.1.last_name == "Ansible last name" - - cm_create_local_users.after.1.login_id == "ansible_local_user" - - cm_create_local_users.after.1.remote_id_claim == "ansible_remote_user" - - cm_create_local_users.after.1.remote_user_authorization == true - - cm_create_local_users.after.1.reuse_limitation == 20 - - cm_create_local_users.after.1.security_domains | length == 1 - - cm_create_local_users.after.1.security_domains.0.name == "all" - - cm_create_local_users.after.1.security_domains.0.roles | length == 2 - - cm_create_local_users.after.1.security_domains.0.roles.0 == "observer" - - cm_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" - - cm_create_local_users.after.1.time_interval_limitation == 10 - - cm_create_local_users.after.2.login_id == "ansible_local_user_2" - - cm_create_local_users.after.2.security_domains | length == 1 - - cm_create_local_users.after.2.security_domains.0.name == "all" - - cm_create_local_users.before | length == 1 - - cm_create_local_users.before.0.login_id == "admin" - - cm_create_local_users.before.0.first_name == "admin" - - cm_create_local_users.before.0.remote_user_authorization == false - - cm_create_local_users.before.0.reuse_limitation == 0 - - cm_create_local_users.before.0.security_domains | length == 1 - - cm_create_local_users.before.0.security_domains.0.name == "all" - - cm_create_local_users.before.0.security_domains.0.roles | length == 1 - - cm_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" - - cm_create_local_users.before.0.time_interval_limitation == 0 - - cm_create_local_users.diff == [] - - cm_create_local_users.proposed.0.email == "ansibleuser@example.com" - - cm_create_local_users.proposed.0.first_name == "Ansible first name" - - cm_create_local_users.proposed.0.last_name == "Ansible last name" - - cm_create_local_users.proposed.0.login_id == "ansible_local_user" - - cm_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" - - cm_create_local_users.proposed.0.remote_user_authorization == true - - cm_create_local_users.proposed.0.reuse_limitation == 20 - - cm_create_local_users.proposed.0.security_domains | length == 1 - - cm_create_local_users.proposed.0.security_domains.0.name == "all" - - cm_create_local_users.proposed.0.security_domains.0.roles | length == 2 - - cm_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" - - cm_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" - - cm_create_local_users.proposed.0.time_interval_limitation == 10 - - cm_create_local_users.proposed.1.login_id == "ansible_local_user_2" - - cm_create_local_users.proposed.1.security_domains | length == 1 - - cm_create_local_users.proposed.1.security_domains.0.name == "all" - - nm_create_local_users is changed - - nm_create_local_users.after.0.first_name == "admin" - - nm_create_local_users.after.0.remote_user_authorization == false - - nm_create_local_users.after.0.reuse_limitation == 0 - - nm_create_local_users.after.0.security_domains | length == 1 - - nm_create_local_users.after.0.security_domains.0.name == "all" - - nm_create_local_users.after.0.security_domains.0.roles | length == 1 - - nm_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" - - nm_create_local_users.after.0.time_interval_limitation == 0 - - nm_create_local_users.after.1.email == "ansibleuser@example.com" - - nm_create_local_users.after.1.first_name == "Ansible first name" - - nm_create_local_users.after.1.last_name == "Ansible last name" - - nm_create_local_users.after.1.login_id == "ansible_local_user" - - nm_create_local_users.after.1.remote_id_claim == "ansible_remote_user" - - nm_create_local_users.after.1.remote_user_authorization == true - - nm_create_local_users.after.1.reuse_limitation == 20 - - nm_create_local_users.after.1.security_domains | length == 1 - - nm_create_local_users.after.1.security_domains.0.name == "all" - - nm_create_local_users.after.1.security_domains.0.roles | length == 2 - - nm_create_local_users.after.1.security_domains.0.roles.0 == "observer" - - nm_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" - - nm_create_local_users.after.1.time_interval_limitation == 10 - - nm_create_local_users.after.2.login_id == "ansible_local_user_2" - - nm_create_local_users.after.2.security_domains | length == 1 - - nm_create_local_users.after.2.security_domains.0.name == "all" - - nm_create_local_users.before | length == 1 - - nm_create_local_users.before.0.login_id == "admin" - - nm_create_local_users.before.0.first_name == "admin" - - nm_create_local_users.before.0.remote_user_authorization == false - - nm_create_local_users.before.0.reuse_limitation == 0 - - nm_create_local_users.before.0.security_domains | length == 1 - - nm_create_local_users.before.0.security_domains.0.name == "all" - - nm_create_local_users.before.0.security_domains.0.roles | length == 1 - - nm_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" - - nm_create_local_users.before.0.time_interval_limitation == 0 - - nm_create_local_users.diff == [] - - nm_create_local_users.proposed.0.email == "ansibleuser@example.com" - - nm_create_local_users.proposed.0.first_name == "Ansible first name" - - nm_create_local_users.proposed.0.last_name == "Ansible last name" - - nm_create_local_users.proposed.0.login_id == "ansible_local_user" - - nm_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" - - nm_create_local_users.proposed.0.remote_user_authorization == true - - nm_create_local_users.proposed.0.reuse_limitation == 20 - - nm_create_local_users.proposed.0.security_domains | length == 1 - - nm_create_local_users.proposed.0.security_domains.0.name == "all" - - nm_create_local_users.proposed.0.security_domains.0.roles | length == 2 - - nm_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" - - nm_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" - - nm_create_local_users.proposed.0.time_interval_limitation == 10 - - nm_create_local_users.proposed.1.login_id == "ansible_local_user_2" - - nm_create_local_users.proposed.1.security_domains | length == 1 - - nm_create_local_users.proposed.1.security_domains.0.name == "all" - -# UPDATE -- name: Replace all ansible_local_user's attributes (check mode) - cisco.nd.nd_local_user: &update_first_local_user - <<: *nd_info - config: - - email: updatedansibleuser@example.com - login_id: ansible_local_user - first_name: Updated Ansible first name - last_name: Updated Ansible last name - user_password: updatedAnsibleLocalUserPassword1% - reuse_limitation: 25 - time_interval_limitation: 15 - security_domains: - - name: all - roles: super_admin - remote_id_claim: "" - remote_user_authorization: false - state: replaced - check_mode: true - register: cm_replace_local_user + - cm_merged_create_local_users is changed + - cm_merged_create_local_users.after | length == 3 + - cm_merged_create_local_users.after.0.login_id == "admin" + - cm_merged_create_local_users.after.0.first_name == "admin" + - cm_merged_create_local_users.after.0.remote_user_authorization == false + - cm_merged_create_local_users.after.0.reuse_limitation == 0 + - cm_merged_create_local_users.after.0.security_domains | length == 1 + - cm_merged_create_local_users.after.0.security_domains.0.name == "all" + - cm_merged_create_local_users.after.0.security_domains.0.roles | length == 1 + - cm_merged_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - cm_merged_create_local_users.after.0.time_interval_limitation == 0 + - cm_merged_create_local_users.after.1.email == "ansibleuser@example.com" + - cm_merged_create_local_users.after.1.first_name == "Ansible first name" + - cm_merged_create_local_users.after.1.last_name == "Ansible last name" + - cm_merged_create_local_users.after.1.login_id == "ansible_local_user" + - cm_merged_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - cm_merged_create_local_users.after.1.remote_user_authorization == true + - cm_merged_create_local_users.after.1.reuse_limitation == 20 + - cm_merged_create_local_users.after.1.security_domains | length == 1 + - cm_merged_create_local_users.after.1.security_domains.0.name == "all" + - cm_merged_create_local_users.after.1.security_domains.0.roles | length == 2 + - cm_merged_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - cm_merged_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - cm_merged_create_local_users.after.1.time_interval_limitation == 10 + - cm_merged_create_local_users.after.2.login_id == "ansible_local_user_2" + - cm_merged_create_local_users.after.2.security_domains | length == 1 + - cm_merged_create_local_users.after.2.security_domains.0.name == "all" + - cm_merged_create_local_users.before | length == 1 + - cm_merged_create_local_users.before.0.login_id == "admin" + - cm_merged_create_local_users.before.0.first_name == "admin" + - cm_merged_create_local_users.before.0.remote_user_authorization == false + - cm_merged_create_local_users.before.0.reuse_limitation == 0 + - cm_merged_create_local_users.before.0.security_domains | length == 1 + - cm_merged_create_local_users.before.0.security_domains.0.name == "all" + - cm_merged_create_local_users.before.0.security_domains.0.roles | length == 1 + - cm_merged_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - cm_merged_create_local_users.before.0.time_interval_limitation == 0 + - cm_merged_create_local_users.proposed.0.email == "ansibleuser@example.com" + - cm_merged_create_local_users.proposed.0.first_name == "Ansible first name" + - cm_merged_create_local_users.proposed.0.last_name == "Ansible last name" + - cm_merged_create_local_users.proposed.0.login_id == "ansible_local_user" + - cm_merged_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_merged_create_local_users.proposed.0.remote_user_authorization == true + - cm_merged_create_local_users.proposed.0.reuse_limitation == 20 + - cm_merged_create_local_users.proposed.0.security_domains | length == 1 + - cm_merged_create_local_users.proposed.0.security_domains.0.name == "all" + - cm_merged_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - cm_merged_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_merged_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - cm_merged_create_local_users.proposed.0.time_interval_limitation == 10 + - cm_merged_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - cm_merged_create_local_users.proposed.1.security_domains | length == 1 + - cm_merged_create_local_users.proposed.1.security_domains.0.name == "all" + - nm_merged_create_local_users is changed + - nm_merged_create_local_users.after.0.first_name == "admin" + - nm_merged_create_local_users.after.0.remote_user_authorization == false + - nm_merged_create_local_users.after.0.reuse_limitation == 0 + - nm_merged_create_local_users.after.0.security_domains | length == 1 + - nm_merged_create_local_users.after.0.security_domains.0.name == "all" + - nm_merged_create_local_users.after.0.security_domains.0.roles | length == 1 + - nm_merged_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - nm_merged_create_local_users.after.0.time_interval_limitation == 0 + - nm_merged_create_local_users.after.1.email == "ansibleuser@example.com" + - nm_merged_create_local_users.after.1.first_name == "Ansible first name" + - nm_merged_create_local_users.after.1.last_name == "Ansible last name" + - nm_merged_create_local_users.after.1.login_id == "ansible_local_user" + - nm_merged_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - nm_merged_create_local_users.after.1.remote_user_authorization == true + - nm_merged_create_local_users.after.1.reuse_limitation == 20 + - nm_merged_create_local_users.after.1.security_domains | length == 1 + - nm_merged_create_local_users.after.1.security_domains.0.name == "all" + - nm_merged_create_local_users.after.1.security_domains.0.roles | length == 2 + - nm_merged_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - nm_merged_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - nm_merged_create_local_users.after.1.time_interval_limitation == 10 + - nm_merged_create_local_users.after.2.login_id == "ansible_local_user_2" + - nm_merged_create_local_users.after.2.security_domains | length == 1 + - nm_merged_create_local_users.after.2.security_domains.0.name == "all" + - nm_merged_create_local_users.before | length == 1 + - nm_merged_create_local_users.before.0.login_id == "admin" + - nm_merged_create_local_users.before.0.first_name == "admin" + - nm_merged_create_local_users.before.0.remote_user_authorization == false + - nm_merged_create_local_users.before.0.reuse_limitation == 0 + - nm_merged_create_local_users.before.0.security_domains | length == 1 + - nm_merged_create_local_users.before.0.security_domains.0.name == "all" + - nm_merged_create_local_users.before.0.security_domains.0.roles | length == 1 + - nm_merged_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - nm_merged_create_local_users.before.0.time_interval_limitation == 0 + - nm_merged_create_local_users.proposed.0.email == "ansibleuser@example.com" + - nm_merged_create_local_users.proposed.0.first_name == "Ansible first name" + - nm_merged_create_local_users.proposed.0.last_name == "Ansible last name" + - nm_merged_create_local_users.proposed.0.login_id == "ansible_local_user" + - nm_merged_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - nm_merged_create_local_users.proposed.0.remote_user_authorization == true + - nm_merged_create_local_users.proposed.0.reuse_limitation == 20 + - nm_merged_create_local_users.proposed.0.security_domains | length == 1 + - nm_merged_create_local_users.proposed.0.security_domains.0.name == "all" + - nm_merged_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - nm_merged_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - nm_merged_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - nm_merged_create_local_users.proposed.0.time_interval_limitation == 10 + - nm_merged_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - nm_merged_create_local_users.proposed.1.security_domains | length == 1 + - nm_merged_create_local_users.proposed.1.security_domains.0.name == "all" -- name: Replace all ansible_local_user's attributes (normal mode) - cisco.nd.nd_local_user: - <<: *update_first_local_user - register: nm_replace_local_user - -- name: Update all ansible_local_user_2's attributes except password - cisco.nd.nd_local_user: &update_second_local_user +# MERGED STATE TESTS: UPDATE +- name: Update all ansible_local_user_2's attributes except password (merge state - check mode) + cisco.nd.nd_local_user: &update_second_local_user_merged_state <<: *nd_info config: - email: secondansibleuser@example.com @@ -206,48 +182,341 @@ remote_id_claim: ansible_remote_user_2 remote_user_authorization: true state: merged - register: nm_merge_local_user_2 + check_mode: true + register: cm_merged_update_local_user_2 -- name: Update all ansible_local_user_2's attributes except password again (idempotency) +- name: Update all ansible_local_user_2's attributes except password (merge state - normal mode) cisco.nd.nd_local_user: - <<: *update_second_local_user - register: nm_merge_local_user_2_again + <<: *update_second_local_user_merged_state + register: nm_merged_update_local_user_2 +- name: Update all ansible_local_user_2's attributes except password again (merge state - idempotency) + cisco.nd.nd_local_user: + <<: *update_second_local_user_merged_state + register: nm_merged_update_local_user_2_again -- name: Override local users with minimum configuration +- name: Asserts for local users update tasks + ansible.builtin.assert: + that: + - cm_merged_update_local_user_2 is changed + - cm_merged_update_local_user_2.after | length == 3 + - cm_merged_update_local_user_2.after.0.email == "secondansibleuser@example.com" + - cm_merged_update_local_user_2.after.0.first_name == "Second Ansible first name" + - cm_merged_update_local_user_2.after.0.last_name == "Second Ansible last name" + - cm_merged_update_local_user_2.after.0.login_id == "ansible_local_user_2" + - cm_merged_update_local_user_2.after.0.remote_id_claim == "ansible_remote_user_2" + - cm_merged_update_local_user_2.after.0.remote_user_authorization == true + - cm_merged_update_local_user_2.after.0.reuse_limitation == 20 + - cm_merged_update_local_user_2.after.0.security_domains | length == 1 + - cm_merged_update_local_user_2.after.0.security_domains.0.name == "all" + - cm_merged_update_local_user_2.after.0.security_domains.0.roles | length == 1 + - cm_merged_update_local_user_2.after.0.security_domains.0.roles.0 == "fabric_admin" + - cm_merged_update_local_user_2.after.0.time_interval_limitation == 10 + - cm_merged_update_local_user_2.after.1.email == "updatedansibleuser@example.com" + - cm_merged_update_local_user_2.after.1.first_name == "Updated Ansible first name" + - cm_merged_update_local_user_2.after.1.last_name == "Updated Ansible last name" + - cm_merged_update_local_user_2.after.1.login_id == "ansible_local_user" + - cm_merged_update_local_user_2.after.1.remote_user_authorization == false + - cm_merged_update_local_user_2.after.1.reuse_limitation == 25 + - cm_merged_update_local_user_2.after.1.security_domains | length == 1 + - cm_merged_update_local_user_2.after.1.security_domains.0.name == "all" + - cm_merged_update_local_user_2.after.1.security_domains.0.roles | length == 1 + - cm_merged_update_local_user_2.after.1.security_domains.0.roles.0 == "super_admin" + - cm_merged_update_local_user_2.after.1.time_interval_limitation == 15 + - cm_merged_update_local_user_2.after.2.login_id == "admin" + - cm_merged_update_local_user_2.after.2.first_name == "admin" + - cm_merged_update_local_user_2.after.2.remote_user_authorization == false + - cm_merged_update_local_user_2.after.2.reuse_limitation == 0 + - cm_merged_update_local_user_2.after.2.security_domains | length == 1 + - cm_merged_update_local_user_2.after.2.security_domains.0.name == "all" + - cm_merged_update_local_user_2.after.2.security_domains.0.roles | length == 1 + - cm_merged_update_local_user_2.after.2.security_domains.0.roles.0 == "super_admin" + - cm_merged_update_local_user_2.after.2.time_interval_limitation == 0 + - cm_merged_update_local_user_2.before | length == 3 + - cm_merged_update_local_user_2.before.2.first_name == "admin" + - cm_merged_update_local_user_2.before.2.remote_user_authorization == false + - cm_merged_update_local_user_2.before.2.reuse_limitation == 0 + - cm_merged_update_local_user_2.before.2.security_domains | length == 1 + - cm_merged_update_local_user_2.before.2.security_domains.0.name == "all" + - cm_merged_update_local_user_2.before.2.security_domains.0.roles | length == 1 + - cm_merged_update_local_user_2.before.2.security_domains.0.roles.0 == "super_admin" + - cm_merged_update_local_user_2.before.2.time_interval_limitation == 0 + - cm_merged_update_local_user_2.before.1.email == "ansibleuser@example.com" + - cm_merged_update_local_user_2.before.1.first_name == "Ansible first name" + - cm_merged_update_local_user_2.before.1.last_name == "Ansible last name" + - cm_merged_update_local_user_2.before.1.login_id == "ansible_local_user" + - cm_merged_update_local_user_2.before.1.remote_id_claim == "ansible_remote_user" + - cm_merged_update_local_user_2.before.1.remote_user_authorization == true + - cm_merged_update_local_user_2.before.1.reuse_limitation == 20 + - cm_merged_update_local_user_2.before.1.security_domains | length == 1 + - cm_merged_update_local_user_2.before.1.security_domains.0.name == "all" + - cm_merged_update_local_user_2.before.1.security_domains.0.roles | length == 2 + - cm_merged_update_local_user_2.before.1.security_domains.0.roles.0 == "observer" + - cm_merged_update_local_user_2.before.1.security_domains.0.roles.0 == "support-engineer" + - cm_merged_update_local_user_2.before.1.time_interval_limitation == 10 + - cm_merged_update_local_user_2.before.0.login_id == "ansible_local_user_2" + - cm_merged_update_local_user_2.before.0.security_domains | length == 1 + - cm_merged_update_local_user_2.before.0.security_domains.0.name == "all" + - cm_merged_update_local_user_2.proposed.0.email == "secondansibleuser@example.com" + - cm_merged_update_local_user_2.proposed.0.first_name == "Second Ansible first name" + - cm_merged_update_local_user_2.proposed.0.last_name == "Second Ansible last name" + - cm_merged_update_local_user_2.proposed.0.login_id == "ansible_local_user_2" + - cm_merged_update_local_user_2.proposed.0.remote_id_claim == "ansible_remote_user_2" + - cm_merged_update_local_user_2.proposed.0.remote_user_authorization == true + - cm_merged_update_local_user_2.proposed.0.reuse_limitation == 20 + - cm_merged_update_local_user_2.proposed.0.security_domains | length == 1 + - cm_merged_update_local_user_2.proposed.0.security_domains.0.name == "all" + - cm_merged_update_local_user_2.proposed.0.security_domains.0.roles | length == 1 + - cm_merged_update_local_user_2.proposed.0.security_domains.0.roles.0 == "fabric_admin" + - cm_merged_update_local_user_2.proposed.0.time_interval_limitation == 10 + - nm_merged_update_local_user_2 is changed + - nm_merged_update_local_user_2.after | length == 3 + - nm_merged_update_local_user_2.after.0.email == "secondansibleuser@example.com" + - nm_merged_update_local_user_2.after.0.first_name == "Second Ansible first name" + - nm_merged_update_local_user_2.after.0.last_name == "Second Ansible last name" + - nm_merged_update_local_user_2.after.0.login_id == "ansible_local_user_2" + - nm_merged_update_local_user_2.after.0.remote_id_claim == "ansible_remote_user_2" + - nm_merged_update_local_user_2.after.0.remote_user_authorization == true + - nm_merged_update_local_user_2.after.0.reuse_limitation == 20 + - nm_merged_update_local_user_2.after.0.security_domains | length == 1 + - nm_merged_update_local_user_2.after.0.security_domains.0.name == "all" + - nm_merged_update_local_user_2.after.0.security_domains.0.roles | length == 1 + - nm_merged_update_local_user_2.after.0.security_domains.0.roles.0 == "fabric_admin" + - nm_merged_update_local_user_2.after.0.time_interval_limitation == 10 + - nm_merged_update_local_user_2.after.1.email == "updatedansibleuser@example.com" + - nm_merged_update_local_user_2.after.1.first_name == "Updated Ansible first name" + - nm_merged_update_local_user_2.after.1.last_name == "Updated Ansible last name" + - nm_merged_update_local_user_2.after.1.login_id == "ansible_local_user" + - nm_merged_update_local_user_2.after.1.remote_user_authorization == false + - nm_merged_update_local_user_2.after.1.reuse_limitation == 25 + - nm_merged_update_local_user_2.after.1.security_domains | length == 1 + - nm_merged_update_local_user_2.after.1.security_domains.0.name == "all" + - nm_merged_update_local_user_2.after.1.security_domains.0.roles | length == 1 + - nm_merged_update_local_user_2.after.1.security_domains.0.roles.0 == "super_admin" + - nm_merged_update_local_user_2.after.1.time_interval_limitation == 15 + - nm_merged_update_local_user_2.after.2.login_id == "admin" + - nm_merged_update_local_user_2.after.2.first_name == "admin" + - nm_merged_update_local_user_2.after.2.remote_user_authorization == false + - nm_merged_update_local_user_2.after.2.reuse_limitation == 0 + - nm_merged_update_local_user_2.after.2.security_domains | length == 1 + - nm_merged_update_local_user_2.after.2.security_domains.0.name == "all" + - nm_merged_update_local_user_2.after.2.security_domains.0.roles | length == 1 + - nm_merged_update_local_user_2.after.2.security_domains.0.roles.0 == "super_admin" + - nm_merged_update_local_user_2.after.2.time_interval_limitation == 0 + - nm_merged_update_local_user_2.before | length == 3 + - nm_merged_update_local_user_2.before.2.first_name == "admin" + - nm_merged_update_local_user_2.before.2.remote_user_authorization == false + - nm_merged_update_local_user_2.before.2.reuse_limitation == 0 + - nm_merged_update_local_user_2.before.2.security_domains | length == 1 + - nm_merged_update_local_user_2.before.2.security_domains.0.name == "all" + - nm_merged_update_local_user_2.before.2.security_domains.0.roles | length == 1 + - nm_merged_update_local_user_2.before.2.security_domains.0.roles.0 == "super_admin" + - nm_merged_update_local_user_2.before.2.time_interval_limitation == 0 + - nm_merged_update_local_user_2.before.1.email == "ansibleuser@example.com" + - nm_merged_update_local_user_2.before.1.first_name == "Ansible first name" + - nm_merged_update_local_user_2.before.1.last_name == "Ansible last name" + - nm_merged_update_local_user_2.before.1.login_id == "ansible_local_user" + - nm_merged_update_local_user_2.before.1.remote_id_claim == "ansible_remote_user" + - nm_merged_update_local_user_2.before.1.remote_user_authorization == true + - nm_merged_update_local_user_2.before.1.reuse_limitation == 20 + - nm_merged_update_local_user_2.before.1.security_domains | length == 1 + - nm_merged_update_local_user_2.before.1.security_domains.0.name == "all" + - nm_merged_update_local_user_2.before.1.security_domains.0.roles | length == 2 + - nm_merged_update_local_user_2.before.1.security_domains.0.roles.0 == "observer" + - nm_merged_update_local_user_2.before.1.security_domains.0.roles.0 == "support-engineer" + - nm_merged_update_local_user_2.before.1.time_interval_limitation == 10 + - nm_merged_update_local_user_2.before.0.login_id == "ansible_local_user_2" + - nm_merged_update_local_user_2.before.0.security_domains | length == 1 + - nm_merged_update_local_user_2.before.0.security_domains.0.name == "all" + - nm_merged_update_local_user_2.proposed.0.email == "secondansibleuser@example.com" + - nm_merged_update_local_user_2.proposed.0.first_name == "Second Ansible first name" + - nm_merged_update_local_user_2.proposed.0.last_name == "Second Ansible last name" + - nm_merged_update_local_user_2.proposed.0.login_id == "ansible_local_user_2" + - nm_merged_update_local_user_2.proposed.0.remote_id_claim == "ansible_remote_user_2" + - nm_merged_update_local_user_2.proposed.0.remote_user_authorization == true + - nm_merged_update_local_user_2.proposed.0.reuse_limitation == 20 + - nm_merged_update_local_user_2.proposed.0.security_domains | length == 1 + - nm_merged_update_local_user_2.proposed.0.security_domains.0.name == "all" + - nm_merged_update_local_user_2.proposed.0.security_domains.0.roles | length == 1 + - nm_merged_update_local_user_2.proposed.0.security_domains.0.roles.0 == "fabric_admin" + - nm_merged_update_local_user_2.proposed.0.time_interval_limitation == 10 + - nm_merged_update_local_user_2_again is not changed + - nm_merged_update_local_user_2_again.after == nm_merged_update_local_user_2.after + - nm_merged_update_local_user_2_again.proposed == nm_merged_update_local_user_2.proposed + +- name: Ensure local users do not exist for next tests cisco.nd.nd_local_user: + <<: *clean_all_local_users + +# --- REPLACED STATE TESTS --- + +# REPLACED STATE TESTS: CREATE +- name: Create local users with full and minimum configuration (replaced state - check mode) + cisco.nd.nd_local_user: &create_local_user_replaced_state <<: *nd_info config: - - email: overrideansibleuser@example.com + - email: ansibleuser@example.com login_id: ansible_local_user - first_name: Overridden Ansible first name - last_name: Overridden Ansible last name - user_password: overideansibleLocalUserPassword1% - reuse_limitation: 15 - time_interval_limitation: 5 + first_name: Ansible first name + last_name: Ansible last name + user_password: ansibleLocalUserPassword1%Test + reuse_limitation: 20 + time_interval_limitation: 10 security_domains: - name: all roles: - observer + - support_engineer remote_id_claim: ansible_remote_user remote_user_authorization: true - - login_id: admin - first_name: admin - remote_user_authorization: false - reuse_limitation: 0 - time_interval_limitation: 0 + - login_id: ansible_local_user_2 + user_password: ansibleLocalUser2Password1%Test security_domains: - name: all - roles: - - super_admin - - login_id: ansible_local_user_3 - user_password: ansibleLocalUser3Password1%Test + state: replaced + check_mode: true + register: cm_replaced_create_local_users + +- name: Create local users with full and minimum configuration (replaced state - normal mode) + cisco.nd.nd_local_user: + <<: *create_local_user_replaced_state + register: nm_replaced_create_local_users + +- name: Asserts for local users replaced state creation tasks + ansible.builtin.assert: + that: + - cm_replaced_create_local_users is changed + - cm_replaced_create_local_users.after | length == 3 + - cm_replaced_create_local_users.after.0.login_id == "admin" + - cm_replaced_create_local_users.after.0.first_name == "admin" + - cm_replaced_create_local_users.after.0.remote_user_authorization == false + - cm_replaced_create_local_users.after.0.reuse_limitation == 0 + - cm_replaced_create_local_users.after.0.security_domains | length == 1 + - cm_replaced_create_local_users.after.0.security_domains.0.name == "all" + - cm_replaced_create_local_users.after.0.security_domains.0.roles | length == 1 + - cm_replaced_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - cm_replaced_create_local_users.after.0.time_interval_limitation == 0 + - cm_replaced_create_local_users.after.1.email == "ansibleuser@example.com" + - cm_replaced_create_local_users.after.1.first_name == "Ansible first name" + - cm_replaced_create_local_users.after.1.last_name == "Ansible last name" + - cm_replaced_create_local_users.after.1.login_id == "ansible_local_user" + - cm_replaced_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - cm_replaced_create_local_users.after.1.remote_user_authorization == true + - cm_replaced_create_local_users.after.1.reuse_limitation == 20 + - cm_replaced_create_local_users.after.1.security_domains | length == 1 + - cm_replaced_create_local_users.after.1.security_domains.0.name == "all" + - cm_replaced_create_local_users.after.1.security_domains.0.roles | length == 2 + - cm_replaced_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - cm_replaced_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - cm_replaced_create_local_users.after.1.time_interval_limitation == 10 + - cm_replaced_create_local_users.after.2.login_id == "ansible_local_user_2" + - cm_replaced_create_local_users.after.2.security_domains | length == 1 + - cm_replaced_create_local_users.after.2.security_domains.0.name == "all" + - cm_replaced_create_local_users.before | length == 1 + - cm_replaced_create_local_users.before.0.login_id == "admin" + - cm_replaced_create_local_users.before.0.first_name == "admin" + - cm_replaced_create_local_users.before.0.remote_user_authorization == false + - cm_replaced_create_local_users.before.0.reuse_limitation == 0 + - cm_replaced_create_local_users.before.0.security_domains | length == 1 + - cm_replaced_create_local_users.before.0.security_domains.0.name == "all" + - cm_replaced_create_local_users.before.0.security_domains.0.roles | length == 1 + - cm_replaced_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - cm_replaced_create_local_users.before.0.time_interval_limitation == 0 + - cm_replaced_create_local_users.proposed.0.email == "ansibleuser@example.com" + - cm_replaced_create_local_users.proposed.0.first_name == "Ansible first name" + - cm_replaced_create_local_users.proposed.0.last_name == "Ansible last name" + - cm_replaced_create_local_users.proposed.0.login_id == "ansible_local_user" + - cm_replaced_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_replaced_create_local_users.proposed.0.remote_user_authorization == true + - cm_replaced_create_local_users.proposed.0.reuse_limitation == 20 + - cm_replaced_create_local_users.proposed.0.security_domains | length == 1 + - cm_replaced_create_local_users.proposed.0.security_domains.0.name == "all" + - cm_replaced_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - cm_replaced_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_replaced_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - cm_replaced_create_local_users.proposed.0.time_interval_limitation == 10 + - cm_replaced_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - cm_replaced_create_local_users.proposed.1.security_domains | length == 1 + - cm_replaced_create_local_users.proposed.1.security_domains.0.name == "all" + - nm_replaced_create_local_users is changed + - nm_replaced_create_local_users.after.0.first_name == "admin" + - nm_replaced_create_local_users.after.0.remote_user_authorization == false + - nm_replaced_create_local_users.after.0.reuse_limitation == 0 + - nm_replaced_create_local_users.after.0.security_domains | length == 1 + - nm_replaced_create_local_users.after.0.security_domains.0.name == "all" + - nm_replaced_create_local_users.after.0.security_domains.0.roles | length == 1 + - nm_replaced_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - nm_replaced_create_local_users.after.0.time_interval_limitation == 0 + - nm_replaced_create_local_users.after.1.email == "ansibleuser@example.com" + - nm_replaced_create_local_users.after.1.first_name == "Ansible first name" + - nm_replaced_create_local_users.after.1.last_name == "Ansible last name" + - nm_replaced_create_local_users.after.1.login_id == "ansible_local_user" + - nm_replaced_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - nm_replaced_create_local_users.after.1.remote_user_authorization == true + - nm_replaced_create_local_users.after.1.reuse_limitation == 20 + - nm_replaced_create_local_users.after.1.security_domains | length == 1 + - nm_replaced_create_local_users.after.1.security_domains.0.name == "all" + - nm_replaced_create_local_users.after.1.security_domains.0.roles | length == 2 + - nm_replaced_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - nm_replaced_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - nm_replaced_create_local_users.after.1.time_interval_limitation == 10 + - nm_replaced_create_local_users.after.2.login_id == "ansible_local_user_2" + - nm_replaced_create_local_users.after.2.security_domains | length == 1 + - nm_replaced_create_local_users.after.2.security_domains.0.name == "all" + - nm_replaced_create_local_users.before | length == 1 + - nm_replaced_create_local_users.before.0.login_id == "admin" + - nm_replaced_create_local_users.before.0.first_name == "admin" + - nm_replaced_create_local_users.before.0.remote_user_authorization == false + - nm_replaced_create_local_users.before.0.reuse_limitation == 0 + - nm_replaced_create_local_users.before.0.security_domains | length == 1 + - nm_replaced_create_local_users.before.0.security_domains.0.name == "all" + - nm_replaced_create_local_users.before.0.security_domains.0.roles | length == 1 + - nm_replaced_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - nm_replaced_create_local_users.before.0.time_interval_limitation == 0 + - nm_replaced_create_local_users.proposed.0.email == "ansibleuser@example.com" + - nm_replaced_create_local_users.proposed.0.first_name == "Ansible first name" + - nm_replaced_create_local_users.proposed.0.last_name == "Ansible last name" + - nm_replaced_create_local_users.proposed.0.login_id == "ansible_local_user" + - nm_replaced_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - nm_replaced_create_local_users.proposed.0.remote_user_authorization == true + - nm_replaced_create_local_users.proposed.0.reuse_limitation == 20 + - nm_replaced_create_local_users.proposed.0.security_domains | length == 1 + - nm_replaced_create_local_users.proposed.0.security_domains.0.name == "all" + - nm_replaced_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - nm_replaced_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - nm_replaced_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - nm_replaced_create_local_users.proposed.0.time_interval_limitation == 10 + - nm_replaced_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - nm_replaced_create_local_users.proposed.1.security_domains | length == 1 + - nm_replaced_create_local_users.proposed.1.security_domains.0.name == "all" + +# REPLACED STATE TESTS: UPDATE +- name: Replace all ansible_local_user's attributes (replaced state - check mode) + cisco.nd.nd_local_user: &update_first_local_user_replaced_state + <<: *nd_info + config: + - email: updatedansibleuser@example.com + login_id: ansible_local_user + first_name: Updated Ansible first name + last_name: Updated Ansible last name + user_password: updatedAnsibleLocalUserPassword1% + reuse_limitation: 25 + time_interval_limitation: 15 security_domains: - name: all - state: overridden - register: nm_override_local_users + roles: super_admin + remote_id_claim: "" + remote_user_authorization: false + state: replaced + check_mode: true + register: cm_replace_local_user -- name: Asserts for local users update tasks +- name: Replace all ansible_local_user's attributes (replaced - normal mode) + cisco.nd.nd_local_user: + <<: *update_first_local_user_replaced_state + register: nm_replace_local_user + +- name: Asserts for local users replaced state update tasks ansible.builtin.assert: that: - cm_replace_local_user is changed @@ -301,7 +570,6 @@ - cm_replace_local_user.before.0.login_id == "ansible_local_user_2" - cm_replace_local_user.before.0.security_domains | length == 1 - cm_replace_local_user.before.0.security_domains.0.name == "all" - - cm_replace_local_user.diff == [] - cm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com" - cm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" - cm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" @@ -365,7 +633,6 @@ - nm_replace_local_user.before.0.login_id == "ansible_local_user_2" - nm_replace_local_user.before.0.security_domains | length == 1 - nm_replace_local_user.before.0.security_domains.0.name == "all" - - nm_replace_local_user.diff == [] - nm_replace_local_user.proposed.0.email == "updatedansibleuser@example.com" - nm_replace_local_user.proposed.0.first_name == "Updated Ansible first name" - nm_replace_local_user.proposed.0.last_name == "Updated Ansible last name" @@ -378,164 +645,368 @@ - nm_replace_local_user.proposed.0.security_domains.0.roles | length == 1 - nm_replace_local_user.proposed.0.security_domains.0.roles.0 == "super_admin" - nm_replace_local_user.proposed.0.time_interval_limitation == 15 - - nm_merge_local_user_2 is changed - - nm_merge_local_user_2.after | length == 3 - - nm_merge_local_user_2.after.0.email == "secondansibleuser@example.com" - - nm_merge_local_user_2.after.0.first_name == "Second Ansible first name" - - nm_merge_local_user_2.after.0.last_name == "Second Ansible last name" - - nm_merge_local_user_2.after.0.login_id == "ansible_local_user_2" - - nm_merge_local_user_2.after.0.remote_id_claim == "ansible_remote_user_2" - - nm_merge_local_user_2.after.0.remote_user_authorization == true - - nm_merge_local_user_2.after.0.reuse_limitation == 20 - - nm_merge_local_user_2.after.0.security_domains | length == 1 - - nm_merge_local_user_2.after.0.security_domains.0.name == "all" - - nm_merge_local_user_2.after.0.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.after.0.security_domains.0.roles.0 == "fabric_admin" - - nm_merge_local_user_2.after.0.time_interval_limitation == 10 - - nm_merge_local_user_2.after.1.email == "updatedansibleuser@example.com" - - nm_merge_local_user_2.after.1.first_name == "Updated Ansible first name" - - nm_merge_local_user_2.after.1.last_name == "Updated Ansible last name" - - nm_merge_local_user_2.after.1.login_id == "ansible_local_user" - - nm_merge_local_user_2.after.1.remote_user_authorization == false - - nm_merge_local_user_2.after.1.reuse_limitation == 25 - - nm_merge_local_user_2.after.1.security_domains | length == 1 - - nm_merge_local_user_2.after.1.security_domains.0.name == "all" - - nm_merge_local_user_2.after.1.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.after.1.security_domains.0.roles.0 == "super_admin" - - nm_merge_local_user_2.after.1.time_interval_limitation == 15 - - nm_merge_local_user_2.after.2.login_id == "admin" - - nm_merge_local_user_2.after.2.first_name == "admin" - - nm_merge_local_user_2.after.2.remote_user_authorization == false - - nm_merge_local_user_2.after.2.reuse_limitation == 0 - - nm_merge_local_user_2.after.2.security_domains | length == 1 - - nm_merge_local_user_2.after.2.security_domains.0.name == "all" - - nm_merge_local_user_2.after.2.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.after.2.security_domains.0.roles.0 == "super_admin" - - nm_merge_local_user_2.after.2.time_interval_limitation == 0 - - nm_merge_local_user_2.before | length == 3 - - nm_merge_local_user_2.before.2.first_name == "admin" - - nm_merge_local_user_2.before.2.remote_user_authorization == false - - nm_merge_local_user_2.before.2.reuse_limitation == 0 - - nm_merge_local_user_2.before.2.security_domains | length == 1 - - nm_merge_local_user_2.before.2.security_domains.0.name == "all" - - nm_merge_local_user_2.before.2.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.before.2.security_domains.0.roles.0 == "super_admin" - - nm_merge_local_user_2.before.2.time_interval_limitation == 0 - - nm_merge_local_user_2.before.1.email == "updatedansibleuser@example.com" - - nm_merge_local_user_2.before.1.first_name == "Updated Ansible first name" - - nm_merge_local_user_2.before.1.last_name == "Updated Ansible last name" - - nm_merge_local_user_2.before.1.login_id == "ansible_local_user" - - nm_merge_local_user_2.before.1.remote_user_authorization == false - - nm_merge_local_user_2.before.1.reuse_limitation == 25 - - nm_merge_local_user_2.before.1.security_domains | length == 1 - - nm_merge_local_user_2.before.1.security_domains.0.name == "all" - - nm_merge_local_user_2.before.1.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.before.1.security_domains.0.roles.0 == "super_admin" - - nm_merge_local_user_2.before.1.time_interval_limitation == 15 - - nm_merge_local_user_2.before.0.login_id == "ansible_local_user_2" - - nm_merge_local_user_2.before.0.security_domains | length == 1 - - nm_merge_local_user_2.before.0.security_domains.0.name == "all" - - nm_merge_local_user_2.diff == [] - - nm_merge_local_user_2.proposed.0.email == "secondansibleuser@example.com" - - nm_merge_local_user_2.proposed.0.first_name == "Second Ansible first name" - - nm_merge_local_user_2.proposed.0.last_name == "Second Ansible last name" - - nm_merge_local_user_2.proposed.0.login_id == "ansible_local_user_2" - - nm_merge_local_user_2.proposed.0.remote_id_claim == "ansible_remote_user_2" - - nm_merge_local_user_2.proposed.0.remote_user_authorization == true - - nm_merge_local_user_2.proposed.0.reuse_limitation == 20 - - nm_merge_local_user_2.proposed.0.security_domains | length == 1 - - nm_merge_local_user_2.proposed.0.security_domains.0.name == "all" - - nm_merge_local_user_2.proposed.0.security_domains.0.roles | length == 1 - - nm_merge_local_user_2.proposed.0.security_domains.0.roles.0 == "fabric_admin" - - nm_merge_local_user_2.proposed.0.time_interval_limitation == 10 - - nm_merge_local_user_2_again is not changed - - nm_merge_local_user_2_again.after == nm_merge_local_user_2.after - - nm_merge_local_user_2_again.diff == [] - - nm_merge_local_user_2_again.proposed == nm_merge_local_user_2.proposed - - nm_override_local_users is changed - - nm_override_local_users.after | length == 3 - - nm_override_local_users.after.0.email == "overrideansibleuser@example.com" - - nm_override_local_users.after.0.first_name == "Overridden Ansible first name" - - nm_override_local_users.after.0.last_name == "Overridden Ansible last name" - - nm_override_local_users.after.0.login_id == "ansible_local_user" - - nm_override_local_users.after.0.remote_id_claim == "ansible_remote_user" - - nm_override_local_users.after.0.remote_user_authorization == true - - nm_override_local_users.after.0.reuse_limitation == 15 - - nm_override_local_users.after.0.security_domains | length == 1 - - nm_override_local_users.after.0.security_domains.0.name == "all" - - nm_override_local_users.after.0.security_domains.0.roles | length == 1 - - nm_override_local_users.after.0.security_domains.0.roles.0 == "observer" - - nm_override_local_users.after.0.time_interval_limitation == 5 - - nm_override_local_users.after.1.login_id == "admin" - - nm_override_local_users.after.1.first_name == "admin" - - nm_override_local_users.after.1.remote_user_authorization == false - - nm_override_local_users.after.1.reuse_limitation == 0 - - nm_override_local_users.after.1.security_domains | length == 1 - - nm_override_local_users.after.1.security_domains.0.name == "all" - - nm_override_local_users.after.1.security_domains.0.roles | length == 1 - - nm_override_local_users.after.1.security_domains.0.roles.0 == "super_admin" - - nm_override_local_users.after.1.time_interval_limitation == 0 - - nm_override_local_users.after.2.login_id == "ansible_local_user_3" - - nm_override_local_users.after.2.security_domains.0.name == "all" - - nm_override_local_users.before | length == 3 - - nm_override_local_users.before.2.first_name == "admin" - - nm_override_local_users.before.2.remote_user_authorization == false - - nm_override_local_users.before.2.reuse_limitation == 0 - - nm_override_local_users.before.2.security_domains | length == 1 - - nm_override_local_users.before.2.security_domains.0.name == "all" - - nm_override_local_users.before.2.security_domains.0.roles | length == 1 - - nm_override_local_users.before.2.security_domains.0.roles.0 == "super_admin" - - nm_override_local_users.before.2.time_interval_limitation == 0 - - nm_override_local_users.before.1.email == "updatedansibleuser@example.com" - - nm_override_local_users.before.1.first_name == "Updated Ansible first name" - - nm_override_local_users.before.1.last_name == "Updated Ansible last name" - - nm_override_local_users.before.1.login_id == "ansible_local_user" - - nm_override_local_users.before.1.remote_user_authorization == false - - nm_override_local_users.before.1.reuse_limitation == 25 - - nm_override_local_users.before.1.security_domains | length == 1 - - nm_override_local_users.before.1.security_domains.0.name == "all" - - nm_override_local_users.before.1.security_domains.0.roles | length == 1 - - nm_override_local_users.before.1.security_domains.0.roles.0 == "super_admin" - - nm_override_local_users.before.1.time_interval_limitation == 15 - - nm_override_local_users.before.0.email == "secondansibleuser@example.com" - - nm_override_local_users.before.0.first_name == "Second Ansible first name" - - nm_override_local_users.before.0.last_name == "Second Ansible last name" - - nm_override_local_users.before.0.login_id == "ansible_local_user_2" - - nm_override_local_users.before.0.remote_id_claim == "ansible_remote_user_2" - - nm_override_local_users.before.0.remote_user_authorization == true - - nm_override_local_users.before.0.reuse_limitation == 20 - - nm_override_local_users.before.0.security_domains | length == 1 - - nm_override_local_users.before.0.security_domains.0.name == "all" - - nm_override_local_users.before.0.security_domains.0.roles | length == 1 - - nm_override_local_users.before.0.security_domains.0.roles.0 == "fabric_admin" - - nm_override_local_users.before.0.time_interval_limitation == 10 - - nm_override_local_users.diff == [] - - nm_override_local_users.proposed.0.email == "overrideansibleuser@example.com" - - nm_override_local_users.proposed.0.first_name == "Overridden Ansible first name" - - nm_override_local_users.proposed.0.last_name == "Overridden Ansible last name" - - nm_override_local_users.proposed.0.login_id == "ansible_local_user" - - nm_override_local_users.proposed.0.remote_id_claim == "ansible_remote_user" - - nm_override_local_users.proposed.0.remote_user_authorization == true - - nm_override_local_users.proposed.0.reuse_limitation == 15 - - nm_override_local_users.proposed.0.security_domains | length == 1 - - nm_override_local_users.proposed.0.security_domains.0.name == "all" - - nm_override_local_users.proposed.0.security_domains.0.roles | length == 1 - - nm_override_local_users.proposed.0.security_domains.0.roles.0 == "observer" - - nm_override_local_users.proposed.0.time_interval_limitation == 5 - - nm_override_local_users.proposed.1.login_id == "admin" - - nm_override_local_users.proposed.1.first_name == "admin" - - nm_override_local_users.proposed.1.remote_user_authorization == false - - nm_override_local_users.proposed.1.reuse_limitation == 0 - - nm_override_local_users.proposed.1.security_domains | length == 1 - - nm_override_local_users.proposed.1.security_domains.0.name == "all" - - nm_override_local_users.proposed.1.security_domains.0.roles | length == 1 - - nm_override_local_users.proposed.1.security_domains.0.roles.0 == "super_admin" - - nm_override_local_users.proposed.1.time_interval_limitation == 0 - - nm_override_local_users.proposed.2.login_id == "ansible_local_user_3" - - nm_override_local_users.proposed.2.security_domains.0.name == "all" - - -# DELETE + +- name: Ensure local users do not exist for next tests + cisco.nd.nd_local_user: + <<: *clean_all_local_users + +# --- OVERRIDDEN STATE TESTS --- + +# OVERRIDDEN STATE TESTS: CREATE +- name: Create local users with full and minimum configuration (overridden state - check mode) + cisco.nd.nd_local_user: &create_local_user_overridden_state + <<: *nd_info + config: + - login_id: admin + first_name: admin + remote_user_authorization: false + reuse_limitation: 0 + time_interval_limitation: 0 + security_domains: + - name: all + roles: + - super_admin + - email: ansibleuser@example.com + login_id: ansible_local_user + first_name: Ansible first name + last_name: Ansible last name + user_password: ansibleLocalUserPassword1%Test + reuse_limitation: 20 + time_interval_limitation: 10 + security_domains: + - name: all + roles: + - observer + - support_engineer + remote_id_claim: ansible_remote_user + remote_user_authorization: true + - login_id: ansible_local_user_2 + user_password: ansibleLocalUser2Password1%Test + security_domains: + - name: all + state: merged + check_mode: true + register: cm_overridden_create_local_users + +- name: Create local users with full and minimum configuration (overridden state - normal mode) + cisco.nd.nd_local_user: + <<: *create_local_user_overridden_state + register: nm_overridden_create_local_users + +- name: Asserts for local users overridden state creation tasks + ansible.builtin.assert: + that: + - cm_overridden_create_local_users is changed + - cm_overridden_create_local_users.after | length == 3 + - cm_overridden_create_local_users.after.0.login_id == "admin" + - cm_overridden_create_local_users.after.0.first_name == "admin" + - cm_overridden_create_local_users.after.0.remote_user_authorization == false + - cm_overridden_create_local_users.after.0.reuse_limitation == 0 + - cm_overridden_create_local_users.after.0.security_domains | length == 1 + - cm_overridden_create_local_users.after.0.security_domains.0.name == "all" + - cm_overridden_create_local_users.after.0.security_domains.0.roles | length == 1 + - cm_overridden_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - cm_overridden_create_local_users.after.0.time_interval_limitation == 0 + - cm_overridden_create_local_users.after.1.email == "ansibleuser@example.com" + - cm_overridden_create_local_users.after.1.first_name == "Ansible first name" + - cm_overridden_create_local_users.after.1.last_name == "Ansible last name" + - cm_overridden_create_local_users.after.1.login_id == "ansible_local_user" + - cm_overridden_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - cm_overridden_create_local_users.after.1.remote_user_authorization == true + - cm_overridden_create_local_users.after.1.reuse_limitation == 20 + - cm_overridden_create_local_users.after.1.security_domains | length == 1 + - cm_overridden_create_local_users.after.1.security_domains.0.name == "all" + - cm_overridden_create_local_users.after.1.security_domains.0.roles | length == 2 + - cm_overridden_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - cm_overridden_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - cm_overridden_create_local_users.after.1.time_interval_limitation == 10 + - cm_overridden_create_local_users.after.2.login_id == "ansible_local_user_2" + - cm_overridden_create_local_users.after.2.security_domains | length == 1 + - cm_overridden_create_local_users.after.2.security_domains.0.name == "all" + - cm_overridden_create_local_users.before | length == 1 + - cm_overridden_create_local_users.before.0.login_id == "admin" + - cm_overridden_create_local_users.before.0.first_name == "admin" + - cm_overridden_create_local_users.before.0.remote_user_authorization == false + - cm_overridden_create_local_users.before.0.reuse_limitation == 0 + - cm_overridden_create_local_users.before.0.security_domains | length == 1 + - cm_overridden_create_local_users.before.0.security_domains.0.name == "all" + - cm_overridden_create_local_users.before.0.security_domains.0.roles | length == 1 + - cm_overridden_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - cm_overridden_create_local_users.before.0.time_interval_limitation == 0 + - cm_overridden_create_local_users.proposed.0.email == "ansibleuser@example.com" + - cm_overridden_create_local_users.proposed.0.first_name == "Ansible first name" + - cm_overridden_create_local_users.proposed.0.last_name == "Ansible last name" + - cm_overridden_create_local_users.proposed.0.login_id == "ansible_local_user" + - cm_overridden_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_overridden_create_local_users.proposed.0.remote_user_authorization == true + - cm_overridden_create_local_users.proposed.0.reuse_limitation == 20 + - cm_overridden_create_local_users.proposed.0.security_domains | length == 1 + - cm_overridden_create_local_users.proposed.0.security_domains.0.name == "all" + - cm_overridden_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - cm_overridden_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_overridden_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - cm_overridden_create_local_users.proposed.0.time_interval_limitation == 10 + - cm_overridden_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - cm_overridden_create_local_users.proposed.1.security_domains | length == 1 + - cm_overridden_create_local_users.proposed.1.security_domains.0.name == "all" + - nm_overridden_create_local_users is changed + - nm_overridden_create_local_users.after.0.first_name == "admin" + - nm_overridden_create_local_users.after.0.remote_user_authorization == false + - nm_overridden_create_local_users.after.0.reuse_limitation == 0 + - nm_overridden_create_local_users.after.0.security_domains | length == 1 + - nm_overridden_create_local_users.after.0.security_domains.0.name == "all" + - nm_overridden_create_local_users.after.0.security_domains.0.roles | length == 1 + - nm_overridden_create_local_users.after.0.security_domains.0.roles.0 == "super_admin" + - nm_overridden_create_local_users.after.0.time_interval_limitation == 0 + - nm_overridden_create_local_users.after.1.email == "ansibleuser@example.com" + - nm_overridden_create_local_users.after.1.first_name == "Ansible first name" + - nm_overridden_create_local_users.after.1.last_name == "Ansible last name" + - nm_overridden_create_local_users.after.1.login_id == "ansible_local_user" + - nm_overridden_create_local_users.after.1.remote_id_claim == "ansible_remote_user" + - nm_overridden_create_local_users.after.1.remote_user_authorization == true + - nm_overridden_create_local_users.after.1.reuse_limitation == 20 + - nm_overridden_create_local_users.after.1.security_domains | length == 1 + - nm_overridden_create_local_users.after.1.security_domains.0.name == "all" + - nm_overridden_create_local_users.after.1.security_domains.0.roles | length == 2 + - nm_overridden_create_local_users.after.1.security_domains.0.roles.0 == "observer" + - nm_overridden_create_local_users.after.1.security_domains.0.roles.1 == "support_engineer" + - nm_overridden_create_local_users.after.1.time_interval_limitation == 10 + - nm_overridden_create_local_users.after.2.login_id == "ansible_local_user_2" + - nm_overridden_create_local_users.after.2.security_domains | length == 1 + - nm_overridden_create_local_users.after.2.security_domains.0.name == "all" + - nm_overridden_create_local_users.before | length == 1 + - nm_overridden_create_local_users.before.0.login_id == "admin" + - nm_overridden_create_local_users.before.0.first_name == "admin" + - nm_overridden_create_local_users.before.0.remote_user_authorization == false + - nm_overridden_create_local_users.before.0.reuse_limitation == 0 + - nm_overridden_create_local_users.before.0.security_domains | length == 1 + - nm_overridden_create_local_users.before.0.security_domains.0.name == "all" + - nm_overridden_create_local_users.before.0.security_domains.0.roles | length == 1 + - nm_overridden_create_local_users.before.0.security_domains.0.roles.0 == "super_admin" + - nm_overridden_create_local_users.before.0.time_interval_limitation == 0 + - nm_overridden_create_local_users.proposed.0.email == "ansibleuser@example.com" + - nm_overridden_create_local_users.proposed.0.first_name == "Ansible first name" + - nm_overridden_create_local_users.proposed.0.last_name == "Ansible last name" + - nm_overridden_create_local_users.proposed.0.login_id == "ansible_local_user" + - nm_overridden_create_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - nm_overridden_create_local_users.proposed.0.remote_user_authorization == true + - nm_overridden_create_local_users.proposed.0.reuse_limitation == 20 + - nm_overridden_create_local_users.proposed.0.security_domains | length == 1 + - nm_overridden_create_local_users.proposed.0.security_domains.0.name == "all" + - nm_overridden_create_local_users.proposed.0.security_domains.0.roles | length == 2 + - nm_overridden_create_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - nm_overridden_create_local_users.proposed.0.security_domains.0.roles.1 == "support_engineer" + - nm_overridden_create_local_users.proposed.0.time_interval_limitation == 10 + - nm_overridden_create_local_users.proposed.1.login_id == "ansible_local_user_2" + - nm_overridden_create_local_users.proposed.1.security_domains | length == 1 + - nm_overridden_create_local_users.proposed.1.security_domains.0.name == "all" + +# OVERRIDDEN STATE TESTS: UPDATE +- name: Override local users with minimum configuration (overridden state - check mode) + cisco.nd.nd_local_user: &update_all_local_users_overridden_state + <<: *nd_info + config: + - email: overrideansibleuser@example.com + login_id: ansible_local_user + first_name: Overridden Ansible first name + last_name: Overridden Ansible last name + user_password: overideansibleLocalUserPassword1% + reuse_limitation: 15 + time_interval_limitation: 5 + security_domains: + - name: all + roles: + - observer + remote_id_claim: ansible_remote_user + remote_user_authorization: true + - login_id: admin + first_name: admin + remote_user_authorization: false + reuse_limitation: 0 + time_interval_limitation: 0 + security_domains: + - name: all + roles: + - super_admin + - login_id: ansible_local_user_3 + user_password: ansibleLocalUser3Password1%Test + security_domains: + - name: all + state: overridden + check_mode: true + register: cm_override_local_users + +- name: Override local users with minimum configuration (overridden state - normal mode) + cisco.nd.nd_local_user: + <<: *update_all_local_users_overridden_state + register: nm_override_local_users + +- name: Asserts for local users overridden state update tasks + ansible.builtin.assert: + that: + - cm_override_local_users is changed + - cm_override_local_users.after | length == 3 + - cm_override_local_users.after.0.email == "overrideansibleuser@example.com" + - cm_override_local_users.after.0.first_name == "Overridden Ansible first name" + - cm_override_local_users.after.0.last_name == "Overridden Ansible last name" + - cm_override_local_users.after.0.login_id == "ansible_local_user" + - cm_override_local_users.after.0.remote_id_claim == "ansible_remote_user" + - cm_override_local_users.after.0.remote_user_authorization == true + - cm_override_local_users.after.0.reuse_limitation == 15 + - cm_override_local_users.after.0.security_domains | length == 1 + - cm_override_local_users.after.0.security_domains.0.name == "all" + - cm_override_local_users.after.0.security_domains.0.roles | length == 1 + - cm_override_local_users.after.0.security_domains.0.roles.0 == "observer" + - cm_override_local_users.after.0.time_interval_limitation == 5 + - cm_override_local_users.after.1.login_id == "admin" + - cm_override_local_users.after.1.first_name == "admin" + - cm_override_local_users.after.1.remote_user_authorization == false + - cm_override_local_users.after.1.reuse_limitation == 0 + - cm_override_local_users.after.1.security_domains | length == 1 + - cm_override_local_users.after.1.security_domains.0.name == "all" + - cm_override_local_users.after.1.security_domains.0.roles | length == 1 + - cm_override_local_users.after.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.after.1.time_interval_limitation == 0 + - cm_override_local_users.after.2.login_id == "ansible_local_user_3" + - cm_override_local_users.after.2.security_domains.0.name == "all" + - cm_override_local_users.before | length == 3 + - cm_override_local_users.before.2.first_name == "admin" + - cm_override_local_users.before.2.remote_user_authorization == false + - cm_override_local_users.before.2.reuse_limitation == 0 + - cm_override_local_users.before.2.security_domains | length == 1 + - cm_override_local_users.before.2.security_domains.0.name == "all" + - cm_override_local_users.before.2.security_domains.0.roles | length == 1 + - cm_override_local_users.before.2.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.before.2.time_interval_limitation == 0 + - cm_override_local_users.before.1.email == "updatedansibleuser@example.com" + - cm_override_local_users.before.1.first_name == "Updated Ansible first name" + - cm_override_local_users.before.1.last_name == "Updated Ansible last name" + - cm_override_local_users.before.1.login_id == "ansible_local_user" + - cm_override_local_users.before.1.remote_user_authorization == false + - cm_override_local_users.before.1.reuse_limitation == 25 + - cm_override_local_users.before.1.security_domains | length == 1 + - cm_override_local_users.before.1.security_domains.0.name == "all" + - cm_override_local_users.before.1.security_domains.0.roles | length == 1 + - cm_override_local_users.before.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.before.1.time_interval_limitation == 15 + - cm_override_local_users.before.0.email == "secondansibleuser@example.com" + - cm_override_local_users.before.0.first_name == "Second Ansible first name" + - cm_override_local_users.before.0.last_name == "Second Ansible last name" + - cm_override_local_users.before.0.login_id == "ansible_local_user_2" + - cm_override_local_users.before.0.remote_id_claim == "ansible_remote_user_2" + - cm_override_local_users.before.0.remote_user_authorization == true + - cm_override_local_users.before.0.reuse_limitation == 20 + - cm_override_local_users.before.0.security_domains | length == 1 + - cm_override_local_users.before.0.security_domains.0.name == "all" + - cm_override_local_users.before.0.security_domains.0.roles | length == 1 + - cm_override_local_users.before.0.security_domains.0.roles.0 == "fabric_admin" + - cm_override_local_users.before.0.time_interval_limitation == 10 + - cm_override_local_users.proposed.0.email == "overrideansibleuser@example.com" + - cm_override_local_users.proposed.0.first_name == "Overridden Ansible first name" + - cm_override_local_users.proposed.0.last_name == "Overridden Ansible last name" + - cm_override_local_users.proposed.0.login_id == "ansible_local_user" + - cm_override_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_override_local_users.proposed.0.remote_user_authorization == true + - cm_override_local_users.proposed.0.reuse_limitation == 15 + - cm_override_local_users.proposed.0.security_domains | length == 1 + - cm_override_local_users.proposed.0.security_domains.0.name == "all" + - cm_override_local_users.proposed.0.security_domains.0.roles | length == 1 + - cm_override_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_override_local_users.proposed.0.time_interval_limitation == 5 + - cm_override_local_users.proposed.1.login_id == "admin" + - cm_override_local_users.proposed.1.first_name == "admin" + - cm_override_local_users.proposed.1.remote_user_authorization == false + - cm_override_local_users.proposed.1.reuse_limitation == 0 + - cm_override_local_users.proposed.1.security_domains | length == 1 + - cm_override_local_users.proposed.1.security_domains.0.name == "all" + - cm_override_local_users.proposed.1.security_domains.0.roles | length == 1 + - cm_override_local_users.proposed.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.proposed.1.time_interval_limitation == 0 + - cm_override_local_users.proposed.2.login_id == "ansible_local_user_3" + - cm_override_local_users.proposed.2.security_domains.0.name == "all" + - cm_override_local_users is changed + - cm_override_local_users.after | length == 3 + - cm_override_local_users.after.0.email == "overrideansibleuser@example.com" + - cm_override_local_users.after.0.first_name == "Overridden Ansible first name" + - cm_override_local_users.after.0.last_name == "Overridden Ansible last name" + - cm_override_local_users.after.0.login_id == "ansible_local_user" + - cm_override_local_users.after.0.remote_id_claim == "ansible_remote_user" + - cm_override_local_users.after.0.remote_user_authorization == true + - cm_override_local_users.after.0.reuse_limitation == 15 + - cm_override_local_users.after.0.security_domains | length == 1 + - cm_override_local_users.after.0.security_domains.0.name == "all" + - cm_override_local_users.after.0.security_domains.0.roles | length == 1 + - cm_override_local_users.after.0.security_domains.0.roles.0 == "observer" + - cm_override_local_users.after.0.time_interval_limitation == 5 + - cm_override_local_users.after.1.login_id == "admin" + - cm_override_local_users.after.1.first_name == "admin" + - cm_override_local_users.after.1.remote_user_authorization == false + - cm_override_local_users.after.1.reuse_limitation == 0 + - cm_override_local_users.after.1.security_domains | length == 1 + - cm_override_local_users.after.1.security_domains.0.name == "all" + - cm_override_local_users.after.1.security_domains.0.roles | length == 1 + - cm_override_local_users.after.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.after.1.time_interval_limitation == 0 + - cm_override_local_users.after.2.login_id == "ansible_local_user_3" + - cm_override_local_users.after.2.security_domains.0.name == "all" + - cm_override_local_users.before | length == 3 + - cm_override_local_users.before.2.first_name == "admin" + - cm_override_local_users.before.2.remote_user_authorization == false + - cm_override_local_users.before.2.reuse_limitation == 0 + - cm_override_local_users.before.2.security_domains | length == 1 + - cm_override_local_users.before.2.security_domains.0.name == "all" + - cm_override_local_users.before.2.security_domains.0.roles | length == 1 + - cm_override_local_users.before.2.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.before.2.time_interval_limitation == 0 + - cm_override_local_users.before.1.email == "updatedansibleuser@example.com" + - cm_override_local_users.before.1.first_name == "Updated Ansible first name" + - cm_override_local_users.before.1.last_name == "Updated Ansible last name" + - cm_override_local_users.before.1.login_id == "ansible_local_user" + - cm_override_local_users.before.1.remote_user_authorization == false + - cm_override_local_users.before.1.reuse_limitation == 25 + - cm_override_local_users.before.1.security_domains | length == 1 + - cm_override_local_users.before.1.security_domains.0.name == "all" + - cm_override_local_users.before.1.security_domains.0.roles | length == 1 + - cm_override_local_users.before.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.before.1.time_interval_limitation == 15 + - cm_override_local_users.before.0.email == "secondansibleuser@example.com" + - cm_override_local_users.before.0.first_name == "Second Ansible first name" + - cm_override_local_users.before.0.last_name == "Second Ansible last name" + - cm_override_local_users.before.0.login_id == "ansible_local_user_2" + - cm_override_local_users.before.0.remote_id_claim == "ansible_remote_user_2" + - cm_override_local_users.before.0.remote_user_authorization == true + - cm_override_local_users.before.0.reuse_limitation == 20 + - cm_override_local_users.before.0.security_domains | length == 1 + - cm_override_local_users.before.0.security_domains.0.name == "all" + - cm_override_local_users.before.0.security_domains.0.roles | length == 1 + - cm_override_local_users.before.0.security_domains.0.roles.0 == "fabric_admin" + - cm_override_local_users.before.0.time_interval_limitation == 10 + - cm_override_local_users.proposed.0.email == "overrideansibleuser@example.com" + - cm_override_local_users.proposed.0.first_name == "Overridden Ansible first name" + - cm_override_local_users.proposed.0.last_name == "Overridden Ansible last name" + - cm_override_local_users.proposed.0.login_id == "ansible_local_user" + - cm_override_local_users.proposed.0.remote_id_claim == "ansible_remote_user" + - cm_override_local_users.proposed.0.remote_user_authorization == true + - cm_override_local_users.proposed.0.reuse_limitation == 15 + - cm_override_local_users.proposed.0.security_domains | length == 1 + - cm_override_local_users.proposed.0.security_domains.0.name == "all" + - cm_override_local_users.proposed.0.security_domains.0.roles | length == 1 + - cm_override_local_users.proposed.0.security_domains.0.roles.0 == "observer" + - cm_override_local_users.proposed.0.time_interval_limitation == 5 + - cm_override_local_users.proposed.1.login_id == "admin" + - cm_override_local_users.proposed.1.first_name == "admin" + - cm_override_local_users.proposed.1.remote_user_authorization == false + - cm_override_local_users.proposed.1.reuse_limitation == 0 + - cm_override_local_users.proposed.1.security_domains | length == 1 + - cm_override_local_users.proposed.1.security_domains.0.name == "all" + - cm_override_local_users.proposed.1.security_domains.0.roles | length == 1 + - cm_override_local_users.proposed.1.security_domains.0.roles.0 == "super_admin" + - cm_override_local_users.proposed.1.time_interval_limitation == 0 + - cm_override_local_users.proposed.2.login_id == "ansible_local_user_3" + - cm_override_local_users.proposed.2.security_domains.0.name == "all" + +# --- DELETED STATE TESTS --- + - name: Delete local user (check mode) cisco.nd.nd_local_user: &delete_local_user <<: *nd_info @@ -594,7 +1065,6 @@ - cm_delete_local_user.before.2.security_domains.0.roles | length == 1 - cm_delete_local_user.before.2.security_domains.0.roles.0 == "super_admin" - cm_delete_local_user.before.2.time_interval_limitation == 0 - - cm_delete_local_user.diff == [] - cm_delete_local_user.proposed.0.login_id == "ansible_local_user" - nm_delete_local_user is changed - nm_delete_local_user.after | length == 2 @@ -632,20 +1102,14 @@ - nm_delete_local_user.before.2.security_domains.0.roles | length == 1 - nm_delete_local_user.before.2.security_domains.0.roles.0 == "super_admin" - nm_delete_local_user.before.2.time_interval_limitation == 0 - - nm_delete_local_user.diff == [] - nm_delete_local_user.proposed.0.login_id == "ansible_local_user" - nm_delete_local_user_again is not changed - nm_delete_local_user_again.after == nm_delete_local_user.after - nm_delete_local_user_again.before == nm_delete_local_user.after - - nm_delete_local_user_again.diff == [] - nm_delete_local_user_again.proposed == nm_delete_local_user.proposed -# CLEAN UP +# --- CLEAN UP --- + - name: Ensure local users do not exist cisco.nd.nd_local_user: - <<: *nd_info - config: - - login_id: ansible_local_user - - login_id: ansible_local_user_2 - - login_id: ansible_local_user_3 - state: deleted + <<: *clean_all_local_users From a859d133f8c73fd5ff0239888aa61674ae10db8f Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 19 Mar 2026 13:03:29 -0400 Subject: [PATCH 063/109] [ignore] Revert local users endpoints filename to aaa_local_users.py. --- .../v1/infra/{infra_aaa_local_users.py => aaa_local_users.py} | 0 plugins/module_utils/orchestrators/local_user.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename plugins/module_utils/endpoints/v1/infra/{infra_aaa_local_users.py => aaa_local_users.py} (100%) diff --git a/plugins/module_utils/endpoints/v1/infra/infra_aaa_local_users.py b/plugins/module_utils/endpoints/v1/infra/aaa_local_users.py similarity index 100% rename from plugins/module_utils/endpoints/v1/infra/infra_aaa_local_users.py rename to plugins/module_utils/endpoints/v1/infra/aaa_local_users.py diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index 0c2a6bf8..b567efa5 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -10,7 +10,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.local_user.local_user import LocalUserModel from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.infra_aaa_local_users import ( +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.infra.aaa_local_users import ( EpInfraAaaLocalUsersPost, EpInfraAaaLocalUsersPut, EpInfraAaaLocalUsersDelete, From 96a492d7db9267799a467b4d985a26de9f5cea6d Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 19 Mar 2026 13:30:59 -0400 Subject: [PATCH 064/109] [ignore] Change in NDStateMachine initialization to take advantage of from_ansible_config static method from NDConfigCollection. --- plugins/module_utils/nd_state_machine.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 37324020..56adc9a9 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -44,17 +44,10 @@ def __init__(self, module: AnsibleModule, model_orchestrator: Type[NDBaseOrchest # Ongoing collection of configuration objects that were changed self.sent = NDConfigCollection(model_class=self.model_class) # Collection of configuration objects given by user - self.proposed = NDConfigCollection(model_class=self.model_class) - - for config in self.module.params.get("config", []): - # Parse config into model - item = self.model_class.from_config(config) - self.proposed.add(item) + self.proposed = NDConfigCollection.from_ansible_config(data=self.module.params.get("config", []), model_class=self.model_class) self.output.assign(after=self.existing, before=self.before, proposed=self.proposed) - except ValidationError as e: - raise NDStateMachineError(f"Invalid configuration. for config {config}: {str(e)}") from e except Exception as e: raise NDStateMachineError(f"Initialization failed: {str(e)}") from e From 4c593dfe325137a6de26aa6a63143407eaae4077 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Thu, 19 Mar 2026 13:32:38 -0400 Subject: [PATCH 065/109] [ignore] Remove ValidationError import from nd_state_machine.py. --- plugins/module_utils/nd_state_machine.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index 56adc9a9..fb812c33 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -5,7 +5,6 @@ from __future__ import absolute_import, division, print_function from typing import Type -from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ValidationError from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput From f3a0eff5f0d1bd3e044fa665d422a073e969600e Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 24 Mar 2026 12:21:33 -0400 Subject: [PATCH 066/109] [ignore] Add function to nd_local_user module. Slighty fix Documentation and Example sections in nd_local_user module. Remove Dict class inheritance from NDConstantMapping. --- plugins/module_utils/constants.py | 2 +- plugins/modules/nd_local_user.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index adbe345e..f5bfd977 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -10,7 +10,7 @@ from copy import deepcopy -class NDConstantMapping(Dict): +class NDConstantMapping: def __init__(self, data: Dict): self.data = data self.new_dict = deepcopy(data) diff --git a/plugins/modules/nd_local_user.py b/plugins/modules/nd_local_user.py index 53680e99..4f1ff197 100644 --- a/plugins/modules/nd_local_user.py +++ b/plugins/modules/nd_local_user.py @@ -109,6 +109,7 @@ notes: - This module is only supported on Nexus Dashboard having version 4.2.1 or higher. - This module is not idempotent when creating or updating a local user object when O(config.user_password) is used. +- When using O(state=overridden), admin user configuration must be specified as it cannot be deleted. """ EXAMPLES = r""" @@ -137,13 +138,14 @@ config: - login_id: local_user_min user_password: localUserMinuser_password - security_domain: all + security_domains: + - name: all state: merged - name: Update local user cisco.nd.nd_local_user: config: - - email: udpateduser@example.com + - email: updateduser@example.com login_id: local_user first_name: Updated user first name last_name: Updated user last name @@ -155,7 +157,6 @@ roles: super_admin - name: ansible_domain roles: observer - roles: super_admin remote_id_claim: "" remote_user_authorization: false state: replaced @@ -173,6 +174,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import require_pydantic from ansible_collections.cisco.nd.plugins.module_utils.models.local_user.local_user import LocalUserModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.local_user import LocalUserOrchestrator @@ -185,6 +187,7 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + require_pydantic(module) try: # Initialize StateMachine From a8073b618511b2735225e4c7de21d5f1d9c4ac44 Mon Sep 17 00:00:00 2001 From: Gaspard Micol Date: Tue, 24 Mar 2026 12:33:17 -0400 Subject: [PATCH 067/109] [ignore] Make NDBaseOrchestrator a Generic class. --- plugins/module_utils/orchestrators/base.py | 18 ++++++++++-------- .../module_utils/orchestrators/local_user.py | 6 +++--- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/plugins/module_utils/orchestrators/base.py b/plugins/module_utils/orchestrators/base.py index fe16a524..be790125 100644 --- a/plugins/module_utils/orchestrators/base.py +++ b/plugins/module_utils/orchestrators/base.py @@ -5,14 +5,16 @@ from __future__ import absolute_import, division, print_function from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import BaseModel, ConfigDict -from typing import ClassVar, Type, Optional +from typing import ClassVar, Type, Optional, Generic, TypeVar from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType +ModelType = TypeVar("ModelType", bound=NDBaseModel) -class NDBaseOrchestrator(BaseModel): + +class NDBaseOrchestrator(BaseModel, Generic[ModelType]): model_config = ConfigDict( use_enum_values=True, validate_assignment=True, @@ -20,7 +22,7 @@ class NDBaseOrchestrator(BaseModel): arbitrary_types_allowed=True, ) - model_class: ClassVar[Type[NDBaseModel]] = Type[NDBaseModel] + model_class: ClassVar[Type[NDBaseModel]] = NDBaseModel # NOTE: if not defined by subclasses, return an error as they are required create_endpoint: Type[NDEndpointBaseModel] @@ -33,14 +35,14 @@ class NDBaseOrchestrator(BaseModel): sender: NDModule # NOTE: Generic CRUD API operations for simple endpoints with single identifier (e.g. "api/v1/infra/aaa/LocalUsers/{loginID}") - def create(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: + def create(self, model_instance: ModelType, **kwargs) -> ResponseType: try: api_endpoint = self.create_endpoint() return self.sender.request(path=api_endpoint.path, method=api_endpoint.verb, data=model_instance.to_payload()) except Exception as e: raise Exception(f"Create failed for {model_instance.get_identifier_value()}: {e}") from e - def update(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: + def update(self, model_instance: ModelType, **kwargs) -> ResponseType: try: api_endpoint = self.update_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) @@ -48,7 +50,7 @@ def update(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: except Exception as e: raise Exception(f"Update failed for {model_instance.get_identifier_value()}: {e}") from e - def delete(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: + def delete(self, model_instance: ModelType, **kwargs) -> ResponseType: try: api_endpoint = self.delete_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) @@ -56,7 +58,7 @@ def delete(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: except Exception as e: raise Exception(f"Delete failed for {model_instance.get_identifier_value()}: {e}") from e - def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: + def query_one(self, model_instance: ModelType, **kwargs) -> ResponseType: try: api_endpoint = self.query_one_endpoint() api_endpoint.set_identifiers(model_instance.get_identifier_value()) @@ -64,7 +66,7 @@ def query_one(self, model_instance: NDBaseModel, **kwargs) -> ResponseType: except Exception as e: raise Exception(f"Query failed for {model_instance.get_identifier_value()}: {e}") from e - def query_all(self, model_instance: Optional[NDBaseModel] = None, **kwargs) -> ResponseType: + def query_all(self, model_instance: Optional[ModelType] = None, **kwargs) -> ResponseType: try: api_endpoint = self.query_all_endpoint() result = self.sender.query_obj(api_endpoint.path) diff --git a/plugins/module_utils/orchestrators/local_user.py b/plugins/module_utils/orchestrators/local_user.py index b567efa5..e95a3003 100644 --- a/plugins/module_utils/orchestrators/local_user.py +++ b/plugins/module_utils/orchestrators/local_user.py @@ -4,7 +4,7 @@ from __future__ import absolute_import, division, print_function -from typing import Type +from typing import Type, ClassVar from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.local_user.local_user import LocalUserModel @@ -18,8 +18,8 @@ ) -class LocalUserOrchestrator(NDBaseOrchestrator): - model_class: Type[NDBaseModel] = LocalUserModel +class LocalUserOrchestrator(NDBaseOrchestrator[LocalUserModel]): + model_class: ClassVar[Type[NDBaseModel]] = LocalUserModel create_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersPost update_endpoint: Type[NDEndpointBaseModel] = EpInfraAaaLocalUsersPut From b4ecddc223fa18cdbb014a068368bc996c084e01 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Wed, 25 Mar 2026 16:28:31 +0530 Subject: [PATCH 068/109] Rename inventory validate to switches validate --- ...d_inventory_validate.py => nd_switches_validate.py} | 10 +++++----- .../targets/nd_manage_switches/tests/nd/deleted.yaml | 6 +++--- .../targets/nd_manage_switches/tests/nd/merged.yaml | 8 ++++---- .../nd_manage_switches/tests/nd/overridden.yaml | 6 +++--- .../targets/nd_manage_switches/tests/nd/poap.yaml | 6 +++--- .../targets/nd_manage_switches/tests/nd/rma.yaml | 4 ++-- .../targets/nd_manage_switches/tests/nd/sanity.yaml | 8 ++++---- 7 files changed, 24 insertions(+), 24 deletions(-) rename plugins/action/{nd_inventory_validate.py => nd_switches_validate.py} (97%) diff --git a/plugins/action/nd_inventory_validate.py b/plugins/action/nd_switches_validate.py similarity index 97% rename from plugins/action/nd_inventory_validate.py rename to plugins/action/nd_switches_validate.py index 024ba634..ed0c4b47 100644 --- a/plugins/action/nd_inventory_validate.py +++ b/plugins/action/nd_switches_validate.py @@ -4,9 +4,9 @@ # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -"""ND Inventory Validation Action Plugin. +"""ND Switches Validation Action Plugin. -Validates switch inventory data returned from nd_rest against expected +Validates switch data returned from nd_rest against expected configuration entries. Checks that every entry in test_data has a matching switch in the ND API response (fabricManagementIp == seed_ip, switchRole == role). @@ -47,7 +47,7 @@ # Validation orchestration model # --------------------------------------------------------------------------- -class InventoryValidate(BaseModel): +class SwitchesValidate(BaseModel): """Orchestrates the match between playbook config entries and live ND inventory.""" config_data: Optional[List[Any]] = None @@ -202,7 +202,7 @@ def run(self, tmp=None, task_vars=None): if not HAS_PYDANTIC or not HAS_MODELS: results["failed"] = True - results["msg"] = "pydantic and the ND collection models are required for nd_inventory_validate" + results["msg"] = "pydantic and the ND collection models are required for nd_switches_validate" return results nd_data = self._task.args["nd_data"] @@ -248,7 +248,7 @@ def run(self, tmp=None, task_vars=None): # Role mode: only match by role, ignore seed_ip ignore_fields["seed_ip"] = 1 - validation = InventoryValidate( + validation = SwitchesValidate( config_data=test_data, nd_data=switches, ignore_fields=ignore_fields, diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml index 97202466..04a2e4f2 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml @@ -30,7 +30,7 @@ tags: deleted - name: Deleted TC1 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_base_conf }}" changed: "{{merged_result.changed}}" @@ -66,7 +66,7 @@ tags: deleted - name: Deleted TC2 - Validate nd Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_delete_conf }}" changed: "{{ delete_result.changed }}" @@ -123,7 +123,7 @@ tags: deleted - name: Deleted TC4 - Validate nd Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_delete_conf }}" changed: "{{ delete_result.changed }}" diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml index 2b8dc056..4520833b 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml @@ -25,7 +25,7 @@ tags: deleted - name: Merged TC1 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_base_conf }}" changed: "{{ merged_result.changed }}" @@ -67,7 +67,7 @@ tags: deleted - name: Merged TC3 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_delete_conf }}" changed: "{{ delete_result.changed }}" @@ -108,7 +108,7 @@ tags: deleted - name: Merged TC4 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_merge_conf }}" changed: "{{ merged_result.changed }}" @@ -174,7 +174,7 @@ tags: deleted - name: Merged TC6 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_mergev_conf }}" changed: "{{ merged_result.changed }}" diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml index 75390bec..f952e8bc 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml @@ -25,7 +25,7 @@ tags: overridden - name: Overridden TC1 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_base_conf }}" changed: " {{ merged_result.changed }}" @@ -77,7 +77,7 @@ tags: overridden - name: Overridden TC3 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_overridden_conf }}" changed: "{{ overridden_result.changed }}" @@ -117,7 +117,7 @@ tags: overridden - name: Overridden TC4 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_overridden_conf }}" changed: "{{ overridden_result.changed }}" diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml index c098c7ca..d21d965b 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml @@ -103,7 +103,7 @@ tags: poap - name: POAP TC1 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_delete_conf }}" register: result @@ -159,7 +159,7 @@ tags: poap - name: Poap TC1 - Merged - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_poap_conf }}" changed: "{{ merged_result.changed }}" @@ -229,7 +229,7 @@ tags: poap - name: Poap TC3 - Merged - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_poap_conf }}" changed: "{{ merged_result.changed }}" diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml index d214ac33..c78ce2f6 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml @@ -70,7 +70,7 @@ tags: rma - name: RMA TC1 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_delete_conf }}" register: result @@ -117,7 +117,7 @@ tags: rma - name: RMA TC2 - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_rma_conf }}" when: rma_enabled == True diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml index f66b59ed..46d1f2a7 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml @@ -30,7 +30,7 @@ tags: sanity - name: Sanity TC1 - Merged - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_base_conf }}" changed: "{{ create_result.changed }}" @@ -72,7 +72,7 @@ # tags: sanity # - name: Sanity TC3 - Query - Validate ND Data -# cisco.nd.nd_inventory_validate: +# cisco.nd.nd_switches_validate: # nd_data: "{{ query_result }}" # test_data: "{{ nd_switches_sanity_conf }}" # changed: "{{ create_result.changed }}" @@ -116,7 +116,7 @@ tags: sanity - name: Sanity TC4 - Overridden - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_sanity_conf }}" changed: "{{ create_result.changed }}" @@ -163,7 +163,7 @@ tags: sanity - name: Sanity TC6 - Deleted - Validate ND Data - cisco.nd.nd_inventory_validate: + cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_sanity_conf }}" changed: "{{ deleted_result.changed }}" From 76426ac431e5a81d26fb8e6a2a0a06386e7f72a4 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 26 Mar 2026 12:19:47 +0530 Subject: [PATCH 069/109] Property changes for POAP, RMA. --- .../models/manage_switches/config_models.py | 44 ++++++------- plugins/module_utils/nd_switch_resources.py | 26 ++++---- .../utils/manage_switches/switch_helpers.py | 2 +- plugins/modules/nd_manage_switches.py | 25 ++++---- .../templates/nd_manage_switches_conf.j2 | 2 +- .../nd_manage_switches/tests/nd/poap.yaml | 2 +- .../nd_manage_switches/tests/nd/rma.yaml | 6 +- .../nd_manage_switches/tests/nd/sanity.yaml | 61 +++++++++++-------- 8 files changed, 90 insertions(+), 78 deletions(-) diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 711bbb57..a9cada16 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -255,17 +255,17 @@ class RMAConfigModel(NDNestedModel): ) # Required fields for RMA - serial_number: str = Field( + new_serial_number: str = Field( ..., - alias="serialNumber", + alias="newSerialNumber", min_length=1, - description="Serial number of switch to Bootstrap for RMA" + description="Serial number of the new/replacement switch to Bootstrap for RMA" ) - old_serial: str = Field( + old_serial_number: str = Field( ..., - alias="oldSerial", + alias="oldSerialNumber", min_length=1, - description="Serial number of switch to be replaced by RMA" + description="Serial number of the existing switch to be replaced by RMA" ) model: Optional[str] = Field( default=None, @@ -296,7 +296,7 @@ class RMAConfigModel(NDNestedModel): ), ) - @field_validator('serial_number', 'old_serial', mode='before') + @field_validator('new_serial_number', 'old_serial_number', mode='before') @classmethod def validate_serial_numbers(cls, v: str) -> str: """Validate serial numbers are not empty.""" @@ -337,7 +337,7 @@ class SwitchConfigModel(NDBaseModel): # Fields excluded from diff — only seed_ip + role are compared exclude_from_diff: ClassVar[List[str]] = [ - "user_name", "password", "auth_proto", + "username", "password", "auth_proto", "preserve_config", "platform_type", "poap", "rma", "operation_type", ] @@ -351,7 +351,7 @@ class SwitchConfigModel(NDBaseModel): ) # Optional fields — required for merged/overridden, optional for query/deleted - user_name: Optional[str] = Field( + username: Optional[str] = Field( default=None, alias="userName", description="Login username to the switch (required for merged/overridden states)" @@ -413,11 +413,11 @@ def to_config_dict(self) -> Dict[str, Any]: """Return the playbook config as a dict with all credentials stripped. Returns: - Dict of config fields with ``user_name``, ``password``, + Dict of config fields with ``username``, ``password``, ``discovery_username``, and ``discovery_password`` excluded. """ return self.to_config(exclude={ - "user_name": True, + "username": True, "password": True, "poap": {"__all__": {"discovery_username": True, "discovery_password": True}}, "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, @@ -455,13 +455,13 @@ def validate_poap_rma_credentials(self) -> Self: """Validate credentials for POAP and RMA operations.""" if self.poap or self.rma: # POAP/RMA require credentials - if not self.user_name or not self.password: + if not self.username or not self.password: raise ValueError( - "For POAP and RMA operations, user_name and password are required" + "For POAP and RMA operations, username and password are required" ) # For POAP and RMA, username should be 'admin' - if self.user_name != "admin": - raise ValueError("For POAP and RMA operations, user_name should be 'admin'") + if self.username != "admin": + raise ValueError("For POAP and RMA operations, username should be 'admin'") return self @@ -472,7 +472,7 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: When ``context={"state": "merged"}`` (or ``"overridden"``) is passed to ``model_validate()``, the model: - Defaults ``role`` to ``SwitchRole.LEAF`` when not specified. - - Enforces that ``user_name`` and ``password`` are provided. + - Enforces that ``username`` and ``password`` are provided. For ``query`` / ``deleted`` (or no context), fields remain as-is. """ @@ -495,9 +495,9 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: if state in ("merged", "overridden"): if self.role is None: self.role = SwitchRole.LEAF - if not self.user_name or not self.password: + if not self.username or not self.password: raise ValueError( - f"user_name and password are required " + f"username and password are required " f"for '{state}' state " f"(switch: {self.seed_ip})" ) @@ -579,7 +579,7 @@ def from_switch_data(cls, sw: Any) -> "SwitchConfigModel": """Build a config-shaped entry from a live inventory record. Only the fields recoverable from the ND inventory API are populated. - Credentials (user_name, password) are intentionally omitted. + Credentials (username, password) are intentionally omitted. Args: sw: A SwitchDataModel instance from the fabric inventory. @@ -616,13 +616,13 @@ def to_gathered_dict(self) -> Dict[str, Any]: """Return a config dict suitable for gathered output. platform_type is excluded (internal detail not needed by the user). - user_name and password are replaced with placeholders so the returned + username and password are replaced with placeholders so the returned data is immediately usable as ``config:`` input after substituting real credentials. Returns: Dict with seed_ip, role, auth_proto, preserve_config, - user_name set to ``""``, password set to ``""``. + username set to ``""``, password set to ``""``. """ result = self.to_config(exclude={ "platform_type": True, @@ -630,7 +630,7 @@ def to_gathered_dict(self) -> Dict[str, Any]: "rma": True, "operation_type": True, }) - result["user_name"] = "" + result["username"] = "" result["password"] = "" return result diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index c5f4147b..c41c59ee 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -925,10 +925,10 @@ def bulk_save_credentials( cred_groups: Dict[Tuple[str, str], List[str]] = {} for sn, cfg in switch_actions: - if not cfg.user_name or not cfg.password: - log.debug(f"Skipping credentials for {sn}: missing user_name or password") + if not cfg.username or not cfg.password: + log.debug(f"Skipping credentials for {sn}: missing username or password") continue - key = (cfg.user_name, cfg.password) + key = (cfg.username, cfg.password) cred_groups.setdefault(key, []).append(sn) if not cred_groups: @@ -1960,7 +1960,7 @@ def handle( switch_actions: List[Tuple[str, SwitchConfigModel]] = [] rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = [] # (new_serial, old_serial, switch_cfg) for switch_cfg, rma_cfg in rma_entries: - new_serial = rma_cfg.serial_number + new_serial = rma_cfg.new_serial_number bootstrap_data = bootstrap_idx.get(new_serial) if not bootstrap_data: @@ -1975,7 +1975,7 @@ def handle( SwitchDiffEngine.validate_switch_api_fields( nd=nd, - serial=rma_cfg.serial_number, + serial=rma_cfg.new_serial_number, model=rma_cfg.model, version=rma_cfg.version, config_data=rma_cfg.config_data, @@ -1986,16 +1986,16 @@ def handle( rma_model = self._build_rma_model( switch_cfg, rma_cfg, bootstrap_data, - old_switch_info[rma_cfg.old_serial], + old_switch_info[rma_cfg.old_serial_number], ) log.info( - f"Built RMA model: replacing {rma_cfg.old_serial} with " + f"Built RMA model: replacing {rma_cfg.old_serial_number} with " f"{rma_model.new_switch_id}" ) - self._provision_rma_switch(rma_cfg.old_serial, rma_model) + self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) switch_actions.append((rma_model.new_switch_id, switch_cfg)) - rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial, switch_cfg)) + rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg)) # Post-processing: wait for RMA switches to become ready, then # save credentials and finalize. RMA switches come up via POAP @@ -2058,7 +2058,7 @@ def _validate_prerequisites( result: Dict[str, Dict[str, Any]] = {} for switch_cfg, rma_cfg in rma_entries: - old_serial = rma_cfg.old_serial + old_serial = rma_cfg.old_serial_number old_switch = existing_by_serial.get(old_serial) if old_switch is None: @@ -2147,12 +2147,12 @@ def _build_rma_model( """ log = self.ctx.log log.debug( - f"ENTER: _build_rma_model(new={rma_cfg.serial_number}, " - f"old={rma_cfg.old_serial})" + f"ENTER: _build_rma_model(new={rma_cfg.new_serial_number}, " + f"old={rma_cfg.old_serial_number})" ) # User config fields - new_switch_id = rma_cfg.serial_number + new_switch_id = rma_cfg.new_serial_number hostname = old_switch_info.get("hostname", "") ip = switch_cfg.seed_ip image_policy = rma_cfg.image_policy diff --git a/plugins/module_utils/utils/manage_switches/switch_helpers.py b/plugins/module_utils/utils/manage_switches/switch_helpers.py index 55f71ba9..539309a7 100644 --- a/plugins/module_utils/utils/manage_switches/switch_helpers.py +++ b/plugins/module_utils/utils/manage_switches/switch_helpers.py @@ -96,7 +96,7 @@ def group_switches_by_credentials( for switch in switches: password_hash = hash(switch.password) group_key = ( - switch.user_name, + switch.username, password_hash, switch.auth_proto, switch.platform_type, diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index b6f01cb8..7037c024 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -69,7 +69,7 @@ type: str default: MD5 choices: ['MD5', 'SHA', 'MD5_DES', 'MD5_AES', 'SHA_DES', 'SHA_AES'] - user_name: + username: description: - Login username for the switch. - For POAP and RMA, should be C(admin). @@ -184,12 +184,12 @@ description: - Password for device discovery during POAP and RMA discovery. type: str - serial_number: + new_serial_number: description: - Serial number of switch to Bootstrap for RMA. type: str required: true - old_serial: + old_serial_number: description: - Serial number of switch to be replaced by RMA. type: str @@ -257,7 +257,7 @@ fabric: my-fabric config: - seed_ip: 192.168.10.201 - user_name: admin + username: admin password: "{{ switch_password }}" role: leaf preserve_config: false @@ -268,11 +268,11 @@ fabric: my-fabric config: - seed_ip: 192.168.10.201 - user_name: admin + username: admin password: "{{ switch_password }}" role: leaf - seed_ip: 192.168.10.202 - user_name: admin + username: admin password: "{{ switch_password }}" role: spine state: merged @@ -282,7 +282,7 @@ fabric: my-fabric config: - seed_ip: 192.168.10.1 - user_name: admin + username: admin password: "{{ switch_password }}" poap: - preprovision_serial: SAL1234ABCD @@ -297,7 +297,7 @@ fabric: my-fabric config: - seed_ip: 192.168.10.1 - user_name: admin + username: admin password: "{{ switch_password }}" poap: - serial_number: SAL5678EFGH @@ -312,7 +312,7 @@ fabric: my-fabric config: - seed_ip: 192.168.10.1 - user_name: admin + username: admin password: "{{ switch_password }}" poap: - serial_number: SAL5678EFGH @@ -324,11 +324,11 @@ fabric: my-fabric config: - seed_ip: 192.168.10.1 - user_name: admin + username: admin password: "{{ switch_password }}" rma: - - old_serial: SAL1234ABCD - serial_number: SAL9999ZZZZ + - old_serial_number: SAL1234ABCD + new_serial_number: SAL9999ZZZZ model: N9K-C93180YC-EX version: "10.3(1)" hostname: leaf-replaced @@ -419,6 +419,7 @@ def main(): # Initialize logging try: log_config = Log() + log_config.config = "/Users/achengam/Documents/Ansible_Dev/NDBranch/ansible_collections/cisco/nd/ansible_cisco_log_r.json" log_config.commit() # Create logger instance for this module log = logging.getLogger("nd.nd_manage_switches") diff --git a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 index fd4978fa..94af1f1b 100644 --- a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 +++ b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 @@ -14,7 +14,7 @@ {% if switch.seed_ip is defined %} {% set _ = switch_item.update({'seed_ip': switch.seed_ip | default('') }) %} {% endif %} -{% set _ = switch_item.update({'user_name': switch_username}) %} +{% set _ = switch_item.update({'username': switch_username}) %} {% set _ = switch_item.update({'password': switch_password}) %} {% if switch.role is defined %} {% set _ = switch_item.update({'role': switch.role | default('') }) %} diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml index d21d965b..62c3bd98 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml @@ -118,7 +118,7 @@ ansible.builtin.set_fact: switch_conf: - seed_ip: "{{ test_data.sw2 }}" - user_name: '{{ switch_username }}' + username: '{{ switch_username }}' password: '{{ switch_password }}' role: border poap: diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml index c78ce2f6..8113ef04 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml @@ -144,11 +144,11 @@ state: merged config: - seed_ip: '{{ test_data.sw1 }}' - user_name: '{{ switch_username }}' + username: '{{ switch_username }}' password: '{{ switch_password }}' rma: - - serial_number: '{{ test_data.sw1_rma_serial }}' - old_serial: '{{ test_data.sw1_serial }}' + - new_serial_number: '{{ test_data.sw1_rma_serial }}' + old_serial_number: '{{ test_data.sw1_serial }}' model: '{{ test_data.rma_model }}' version: '{{ test_data.rma_version }}' hostname: '{{ test_data.rma_hostname }}' diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml index 46d1f2a7..67b4548d 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml @@ -51,33 +51,44 @@ tags: sanity # ---------------------------------------------- # -# Query # +# Gathered # # ---------------------------------------------- # -# # TC - 3 -# - name: Sanity TC3 - Query - Prepare Conf -# ansible.builtin.set_fact: -# nd_switches_sanity_conf: -# - seed_ip: "{{ test_data.sw1 }}" -# role: leaf -# delegate_to: localhost -# tags: sanity - -# - name: Sanity TC3 - Query - Query a Switch - Hostname and Role must match -# cisco.nd.nd_manage_switches: -# fabric: "{{ test_data.test_fabric }}" -# state: query -# config: "{{ nd_switches_sanity_conf }}" -# register: query_result -# tags: sanity - -# - name: Sanity TC3 - Query - Validate ND Data -# cisco.nd.nd_switches_validate: -# nd_data: "{{ query_result }}" -# test_data: "{{ nd_switches_sanity_conf }}" -# changed: "{{ create_result.changed }}" -# register: result -# tags: sanity +# TC - 3 +- name: Sanity TC3 - Gathered - Gather Switch State in Fabric + cisco.nd.nd_manage_switches: + state: gathered + fabric: "{{ test_data.test_fabric }}" + register: gathered_result + tags: sanity + +- name: Sanity TC3 - Gathered - Build Gathered Lookup + ansible.builtin.set_fact: + gathered_seeds: "{{ gathered_result.gathered | map(attribute='seed_ip') | list }}" + gathered_role_map: "{{ gathered_result.gathered | items2dict(key_name='seed_ip', value_name='role') }}" + delegate_to: localhost + tags: sanity + +- name: Sanity TC3 - Gathered - Validate Gathered Count + ansible.builtin.assert: + that: + - gathered_result.gathered | length == nd_switches_base_conf | length + fail_msg: >- + Gathered count {{ gathered_result.gathered | length }} does not match + expected {{ nd_switches_base_conf | length }} + tags: sanity + +- name: Sanity TC3 - Gathered - Validate Each Switch Present and Role Matches + ansible.builtin.assert: + that: + - item.seed_ip in gathered_seeds + - "'role' not in item or gathered_role_map[item.seed_ip] == item.role" + fail_msg: >- + Switch {{ item.seed_ip }} missing from gathered output or role mismatch + (expected={{ item.role | default('any') }}, + got={{ gathered_role_map[item.seed_ip] | default('not found') }}) + loop: "{{ nd_switches_base_conf }}" + tags: sanity # ---------------------------------------------- # # Overridden # From 4aa9b7ebdfa2f67ad69ff337e986fbcaf45d3426 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 26 Mar 2026 15:36:44 +0530 Subject: [PATCH 070/109] Splitting POAP into Preprovision/Poap and Lucene Params Fix --- .../module_utils/endpoints/query_params.py | 4 +- .../models/manage_switches/__init__.py | 2 + .../models/manage_switches/config_models.py | 304 ++++++++++-------- plugins/module_utils/nd_switch_resources.py | 167 ++++++---- plugins/modules/nd_manage_switches.py | 117 ++++--- .../templates/nd_manage_switches_conf.j2 | 54 ++-- .../nd_manage_switches/tests/nd/poap.yaml | 27 +- 7 files changed, 389 insertions(+), 286 deletions(-) diff --git a/plugins/module_utils/endpoints/query_params.py b/plugins/module_utils/endpoints/query_params.py index 0d2c112e..54ae39f6 100644 --- a/plugins/module_utils/endpoints/query_params.py +++ b/plugins/module_utils/endpoints/query_params.py @@ -215,7 +215,9 @@ def to_query_string(self, url_encode: bool = True) -> str: # Lucene filter expressions require ':' and ' ' to remain unencoded # so the server-side parser can recognise the field:value syntax. if url_encode: - safe_chars = ": " if field_name == "filter" else "" + # Keep ':' unencoded so Lucene field:value syntax is preserved. + # Spaces are encoded as %20 so the query string is valid in URLs. + safe_chars = ":" if field_name == "filter" else "" encoded_value = quote(str(field_value), safe=safe_chars) else: encoded_value = str(field_value) diff --git a/plugins/module_utils/models/manage_switches/__init__.py b/plugins/module_utils/models/manage_switches/__init__.py index 38e667a8..83020728 100644 --- a/plugins/module_utils/models/manage_switches/__init__.py +++ b/plugins/module_utils/models/manage_switches/__init__.py @@ -86,6 +86,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( # noqa: F401 ConfigDataModel, POAPConfigModel, + PreprovisionConfigModel, RMAConfigModel, SwitchConfigModel, ) @@ -136,6 +137,7 @@ # Config models "ConfigDataModel", "POAPConfigModel", + "PreprovisionConfigModel", "RMAConfigModel", "SwitchConfigModel", ] diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index a9cada16..2f6873c1 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -84,15 +84,29 @@ def validate_gateway(cls, v: str) -> str: class POAPConfigModel(NDNestedModel): - """ - POAP configuration entry for a single switch in the playbook config list. + """Bootstrap POAP config for a single switch. - Supports Bootstrap (serial_number only), Pre-provision (preprovision_serial only), - and Swap (both serial fields) operation modes. + Used when ``poap`` is specified alone (bootstrap-only operation). + ``serial_number`` and ``hostname`` are mandatory; all other fields are optional. + Model, version, and config data are sourced from the bootstrap API at runtime. + If the bootstrap API reports a different hostname or role, the API value overrides + the user-provided value and a warning is logged. """ identifiers: ClassVar[List[str]] = [] - # Discovery credentials + # Mandatory + serial_number: str = Field( + ..., + alias="serialNumber", + min_length=1, + description="Serial number of the physical switch to Bootstrap" + ) + hostname: str = Field( + ..., + description="Hostname for the switch during bootstrap" + ) + + # Optional discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", @@ -103,117 +117,97 @@ class POAPConfigModel(NDNestedModel): alias="discoveryPassword", description="Password for device discovery during POAP" ) - - # Bootstrap operation - requires actual switch serial number - serial_number: Optional[str] = Field( + image_policy: Optional[str] = Field( default=None, - alias="serialNumber", - min_length=1, - description="Serial number of switch to Bootstrap" + alias="imagePolicy", + description="Name of the image policy to be applied on switch" ) - # Pre-provision operation - requires pre-provision serial number - preprovision_serial: Optional[str] = Field( - default=None, - alias="preprovisionSerial", + @model_validator(mode='after') + def validate_discovery_credentials_pair(self) -> Self: + """Validate that discovery_username and discovery_password are both set or both absent.""" + has_user = bool(self.discovery_username) + has_pass = bool(self.discovery_password) + if has_user and not has_pass: + raise ValueError( + "discovery_password must be set when discovery_username is specified" + ) + if has_pass and not has_user: + raise ValueError( + "discovery_username must be set when discovery_password is specified" + ) + return self + + @field_validator('serial_number', mode='before') + @classmethod + def validate_serial_number_field(cls, v: str) -> str: + """Validate serial_number is not empty.""" + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result + + +class PreprovisionConfigModel(NDNestedModel): + """Pre-provision config for a single switch. + + Used when ``preprovision`` is specified alone. + All five fields — ``serial_number``, ``model``, ``version``, ``hostname``, + and ``config_data`` — are mandatory because the controller has no physical + switch to pull these values from. + """ + identifiers: ClassVar[List[str]] = [] + + # Mandatory + serial_number: str = Field( + ..., + alias="serialNumber", min_length=1, description="Serial number of switch to Pre-provision" ) - - # Common fields for both operations - model: Optional[str] = Field( - default=None, - description="Model of switch to Bootstrap/Pre-provision" - ) - version: Optional[str] = Field( - default=None, - description="Software version of switch to Bootstrap/Pre-provision" + model: str = Field( + ..., + min_length=1, + description="Model of switch to Pre-provision" ) - hostname: Optional[str] = Field( - default=None, - description="Hostname of switch to Bootstrap/Pre-provision" + version: str = Field( + ..., + min_length=1, + description="Software version of switch to Pre-provision" ) - image_policy: Optional[str] = Field( - default=None, - alias="imagePolicy", - description="Name of the image policy to be applied on switch" + hostname: str = Field( + ..., + description="Hostname for the switch during pre-provision" ) - config_data: Optional[ConfigDataModel] = Field( - default=None, + config_data: ConfigDataModel = Field( + ..., alias="configData", description=( - "Basic config data of switch to Bootstrap/Pre-provision. " + "Basic config data of switch to Pre-provision. " "'models' (list of module models) and 'gateway' (IP with mask) are mandatory." ), ) - @model_validator(mode='after') - def validate_operation_type(self) -> Self: - """Validate serial_number / preprovision_serial combinations. - - Allowed combinations: - - serial_number only → Bootstrap - - preprovision_serial only → Pre-provision - - both serial_number AND preprovision_serial → Swap (change serial - number of an existing pre-provisioned switch) - - neither → error - """ - has_serial = bool(self.serial_number) - has_preprov = bool(self.preprovision_serial) - - if not has_serial and not has_preprov: - raise ValueError( - "Either 'serial_number' (for Bootstrap / Swap) or 'preprovision_serial' " - "(for Pre-provision / Swap) must be provided." - ) - - return self - - @model_validator(mode='after') - def validate_required_fields_for_non_swap(self) -> Self: - """Validate model/version/hostname/config_data for pre-provision operations. - - Pre-provision (preprovision_serial only): - model, version, hostname, config_data are all mandatory because the - controller has no physical switch to pull these values from. - - Bootstrap (serial_number only): - These fields are optional — they can be omitted and the module will - pull them from the bootstrap GET API response at runtime. If - provided, they are validated against the bootstrap data before import. - - Swap (both serials present): - No check needed — the swap API only requires the new serial number. - """ - has_serial = bool(self.serial_number) - has_preprov = bool(self.preprovision_serial) - - # Pre-provision only: all four descriptor fields are mandatory - if has_preprov and not has_serial: - missing = [] - if not self.model: - missing.append("model") - if not self.version: - missing.append("version") - if not self.hostname: - missing.append("hostname") - if not self.config_data: - missing.append("config_data") - if missing: - raise ValueError( - f"model, version, hostname and config_data are required for " - f"Pre-provisioning a switch. Missing: {', '.join(missing)}" - ) - return self + # Optional + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during pre-provision" + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during pre-provision" + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy to apply during pre-provision" + ) @model_validator(mode='after') def validate_discovery_credentials_pair(self) -> Self: - """Validate that discovery_username and discovery_password are both set or both absent. - - Mirrors the dcnm_inventory.py bidirectional check: - - discovery_username set → discovery_password required - - discovery_password set → discovery_username required - """ + """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) if has_user and not has_pass: @@ -226,11 +220,14 @@ def validate_discovery_credentials_pair(self) -> Self: ) return self - @field_validator('serial_number', 'preprovision_serial', mode='before') + @field_validator('serial_number', mode='before') @classmethod - def validate_serial_numbers(cls, v: Optional[str]) -> Optional[str]: - """Validate serial numbers are not empty strings.""" - return SwitchValidators.validate_serial_number(v) + def validate_serial_number_field(cls, v: str) -> str: + """Validate serial_number is not empty.""" + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("serial_number cannot be empty") + return result class RMAConfigModel(NDNestedModel): @@ -330,15 +327,16 @@ class SwitchConfigModel(NDBaseModel): """ Per-switch configuration entry in the Ansible playbook config list. - Supports normal switch addition, POAP (Bootstrap and Pre-provision), and RMA - operations. The operation type is derived from the presence of poap or rma fields. + Supports normal switch addition, POAP (Bootstrap), Pre-provision, Swap + (both poap+preprovision), and RMA operations. The operation type is derived + from the presence of poap, preprovision, and/or rma fields. """ identifiers: ClassVar[List[str]] = ["seed_ip"] # Fields excluded from diff — only seed_ip + role are compared exclude_from_diff: ClassVar[List[str]] = [ "username", "password", "auth_proto", - "preserve_config", "platform_type", "poap", "rma", + "preserve_config", "platform_type", "poap", "preprovision", "rma", "operation_type", ] @@ -381,10 +379,14 @@ class SwitchConfigModel(NDBaseModel): description="Platform type of the switch (nx-os, ios-xe, etc.)" ) - # POAP and RMA configurations - poap: Optional[List[POAPConfigModel]] = Field( + # POAP, Pre-provision and RMA configurations + poap: Optional[POAPConfigModel] = Field( default=None, - description="POAP (PowerOn Auto Provisioning) configurations for Bootstrap/Pre-provision" + description="Bootstrap POAP config (serial_number + hostname mandatory)" + ) + preprovision: Optional[PreprovisionConfigModel] = Field( + default=None, + description="Pre-provision config (serial_number, model, version, hostname, config_data all mandatory)" ) rma: Optional[List[RMAConfigModel]] = Field( default=None, @@ -395,16 +397,22 @@ class SwitchConfigModel(NDBaseModel): @computed_field @property - def operation_type(self) -> Literal["normal", "poap", "rma"]: + def operation_type(self) -> Literal["normal", "poap", "preprovision", "swap", "rma"]: """Determine the operation type from this config. Returns: - ``'poap'`` if POAP configs are present, + ``'swap'`` if both poap and preprovision are present, + ``'poap'`` if only bootstrap poap is present, + ``'preprovision'`` if only preprovision is present, ``'rma'`` if RMA configs are present, ``'normal'`` otherwise. """ + if self.poap and self.preprovision: + return "swap" if self.poap: return "poap" + if self.preprovision: + return "preprovision" if self.rma: return "rma" return "normal" @@ -419,22 +427,24 @@ def to_config_dict(self) -> Dict[str, Any]: return self.to_config(exclude={ "username": True, "password": True, - "poap": {"__all__": {"discovery_username": True, "discovery_password": True}}, + "poap": {"discovery_username": True, "discovery_password": True}, + "preprovision": {"discovery_username": True, "discovery_password": True}, "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, }) @model_validator(mode='after') - def reject_auth_proto_for_poap_rma(self) -> Self: - """Reject non-MD5 auth_proto when POAP or RMA is configured. - - POAP, Pre-provision, and RMA operations always use MD5 internally. - By validating mode='after', all inputs (raw strings, enum instances, - or Ansible argspec-injected defaults) have already been coerced by - Pydantic into a typed SnmpV3AuthProtocol value, so a direct enum - comparison is safe and unambiguous. + def reject_auth_proto_for_special_ops(self) -> Self: + """Reject non-MD5 auth_proto when POAP, Pre-provision, Swap or RMA is configured. + + These operations always use MD5 internally. By validating mode='after', + all inputs have already been coerced by Pydantic into a typed + SnmpV3AuthProtocol value, so a direct enum comparison is safe. """ - if (self.poap or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: - op = "POAP" if self.poap else "RMA" + if (self.poap or self.preprovision or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: + if self.poap or self.preprovision: + op = "POAP/Pre-provision" + else: + op = "RMA" raise ValueError( f"'auth_proto' must not be specified for {op} operations. " f"The authentication protocol is always MD5 and is set " @@ -443,26 +453,36 @@ def reject_auth_proto_for_poap_rma(self) -> Self: return self @model_validator(mode='after') - def validate_poap_rma_mutual_exclusion(self) -> Self: - """Validate that POAP and RMA are mutually exclusive.""" - if self.poap and self.rma: - raise ValueError("Cannot specify both 'poap' and 'rma' configurations for the same switch") - + def validate_special_ops_exclusion(self) -> Self: + """Validate mutually exclusive operation combinations. + + Allowed: + - poap only (Bootstrap) + - preprovision only (Pre-provision) + - poap + preprovision (Swap) + - rma (RMA) + Not allowed: + - rma combined with poap or preprovision + """ + if self.rma and (self.poap or self.preprovision): + raise ValueError( + "Cannot specify 'rma' together with 'poap' or 'preprovision' " + "for the same switch" + ) return self @model_validator(mode='after') - def validate_poap_rma_credentials(self) -> Self: - """Validate credentials for POAP and RMA operations.""" - if self.poap or self.rma: - # POAP/RMA require credentials + def validate_special_ops_credentials(self) -> Self: + """Validate credentials for POAP, Pre-provision, Swap and RMA operations.""" + if self.poap or self.preprovision or self.rma: if not self.username or not self.password: raise ValueError( - "For POAP and RMA operations, username and password are required" + "For POAP, Pre-provision, and RMA operations, username and password are required" ) - # For POAP and RMA, username should be 'admin' if self.username != "admin": - raise ValueError("For POAP and RMA operations, username should be 'admin'") - + raise ValueError( + "For POAP, Pre-provision, and RMA operations, username should be 'admin'" + ) return self @model_validator(mode='after') @@ -478,10 +498,10 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: """ state = (info.context or {}).get("state") if info else None - # POAP only allowed with merged - if self.poap and state not in (None, "merged"): + # POAP/Pre-provision/Swap only allowed with merged + if (self.poap or self.preprovision) and state not in (None, "merged"): raise ValueError( - f"POAP operations require 'merged' state, " + f"POAP/Pre-provision operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})" ) @@ -538,12 +558,12 @@ def validate_seed_ip(cls, v: str) -> str: f"'{v}' is not a valid IP address and could not be resolved via DNS" ) - @field_validator('poap', 'rma', mode='before') + @field_validator('rma', mode='before') @classmethod - def validate_lists_not_empty(cls, v: Optional[List]) -> Optional[List]: - """Validate that if POAP or RMA lists are provided, they are not empty.""" + def validate_rma_list_not_empty(cls, v: Optional[List]) -> Optional[List]: + """Validate that if RMA list is provided, it is not empty.""" if v is not None and len(v) == 0: - raise ValueError("POAP/RMA list cannot be empty if provided") + raise ValueError("RMA list cannot be empty if provided") return v @field_validator('auth_proto', mode='before') @@ -627,6 +647,7 @@ def to_gathered_dict(self) -> Dict[str, Any]: result = self.to_config(exclude={ "platform_type": True, "poap": True, + "preprovision": True, "rma": True, "operation_type": True, }) @@ -653,6 +674,7 @@ def get_argument_spec(cls) -> Dict[str, Any]: __all__ = [ "ConfigDataModel", "POAPConfigModel", + "PreprovisionConfigModel", "RMAConfigModel", "SwitchConfigModel", ] diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index c41c59ee..565bdf49 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -45,6 +45,7 @@ SwitchCredentialsRequestModel, ChangeSwitchSerialNumberRequestModel, POAPConfigModel, + PreprovisionConfigModel, RMAConfigModel, ) from ansible_collections.cisco.nd.plugins.module_utils.utils.manage_switches import ( @@ -1175,28 +1176,44 @@ def handle( # Classify entries first so check mode can report per-operation counts bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] - preprov_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] - swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] + preprov_entries: List[Tuple[SwitchConfigModel, PreprovisionConfigModel]] = [] + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel]] = [] for switch_cfg in proposed_config: - if not switch_cfg.poap: - log.warning( - f"Switch config for {switch_cfg.seed_ip} has no POAP block — skipping" - ) - continue - - for poap_cfg in switch_cfg.poap: - if poap_cfg.serial_number and poap_cfg.preprovision_serial: - swap_entries.append((switch_cfg, poap_cfg)) - elif poap_cfg.preprovision_serial: - preprov_entries.append((switch_cfg, poap_cfg)) - elif poap_cfg.serial_number: - bootstrap_entries.append((switch_cfg, poap_cfg)) - else: + has_poap = bool(switch_cfg.poap) + has_preprov = bool(switch_cfg.preprovision) + + if has_poap and has_preprov: + # Swap: only serial_number is meaningful on each side; warn about extras + poap_extra = [ + f for f in ["hostname", "image_policy", "discovery_username", "discovery_password"] + if getattr(switch_cfg.poap, f, None) + ] + preprov_extra = [ + f for f in ["model", "version", "hostname", "config_data", + "image_policy", "discovery_username", "discovery_password"] + if getattr(switch_cfg.preprovision, f, None) + ] + if poap_extra: + log.warning( + f"Swap ({switch_cfg.seed_ip}): extra fields in 'poap' will be " + f"ignored during swap: {poap_extra}" + ) + if preprov_extra: log.warning( - f"POAP entry for {switch_cfg.seed_ip} has neither " - f"serial_number nor preprovision_serial — skipping" + f"Swap ({switch_cfg.seed_ip}): extra fields in 'preprovision' will be " + f"ignored during swap: {preprov_extra}" ) + swap_entries.append((switch_cfg, switch_cfg.poap, switch_cfg.preprovision)) + elif has_preprov: + preprov_entries.append((switch_cfg, switch_cfg.preprovision)) + elif has_poap: + bootstrap_entries.append((switch_cfg, switch_cfg.poap)) + else: + log.warning( + f"Switch config for {switch_cfg.seed_ip} has no poap or preprovision " + f"block — skipping" + ) log.info( f"POAP classification: {len(bootstrap_entries)} bootstrap, " @@ -1249,14 +1266,14 @@ def handle( bootstrap_entries = active_bootstrap active_preprov = [] - for switch_cfg, poap_cfg in preprov_entries: + for switch_cfg, preprov_cfg in preprov_entries: if switch_cfg.seed_ip in existing_by_ip: log.info( f"PreProvision: IP '{switch_cfg.seed_ip}' already in fabric " f"— idempotent, skipping" ) else: - active_preprov.append((switch_cfg, poap_cfg)) + active_preprov.append((switch_cfg, preprov_cfg)) preprov_entries = active_preprov # Handle swap entries (change serial number on pre-provisioned switches) @@ -1270,8 +1287,8 @@ def handle( # Handle pre-provision entries if preprov_entries: preprov_models: List[PreProvisionSwitchModel] = [] - for switch_cfg, poap_cfg in preprov_entries: - pp_model = self._build_preprovision_model(switch_cfg, poap_cfg) + for switch_cfg, preprov_cfg in preprov_entries: + pp_model = self._build_preprovision_model(switch_cfg, preprov_cfg) preprov_models.append(pp_model) log.info( f"Built pre-provision model for serial=" @@ -1335,20 +1352,6 @@ def _handle_poap_bootstrap( log.error(msg) nd.module.fail_json(msg=msg) - # Validate user-supplied fields against bootstrap data (if provided) - # and warn about any fields that will be pulled from the API. - SwitchDiffEngine.validate_switch_api_fields( - nd=nd, - serial=poap_cfg.serial_number, - model=poap_cfg.model, - version=poap_cfg.version, - config_data=poap_cfg.config_data, - bootstrap_data=bootstrap_data, - log=log, - context="Bootstrap", - hostname=poap_cfg.hostname, - ) - model = self._build_bootstrap_import_model( switch_cfg, poap_cfg, bootstrap_data ) @@ -1413,20 +1416,44 @@ def _build_bootstrap_import_model( discovery_username = getattr(poap_cfg, "discovery_username", None) discovery_password = getattr(poap_cfg, "discovery_password", None) - # Use user-provided values when available; fall back to bootstrap API data. - model = poap_cfg.model or bs.get("model", "") - version = poap_cfg.version or bs.get("softwareVersion", "") - hostname = poap_cfg.hostname or bs.get("hostname", "") + # model, version and config_data always come from the bootstrap API for + # bootstrap-only operations. POAP no longer carries these fields. + model = bs.get("model", "") + version = bs.get("softwareVersion", "") gateway_ip_mask = ( - (poap_cfg.config_data.gateway if poap_cfg.config_data else None) - or bs.get("gatewayIpMask") + bs.get("gatewayIpMask") or bs_data.get("gatewayIpMask") ) - data_models = ( - (poap_cfg.config_data.models if poap_cfg.config_data else None) - or bs_data.get("models", []) - ) + data_models = bs_data.get("models", []) + + # Hostname: user-provided via poap.hostname is the default; if the + # bootstrap API returns a different value, the API wins and we warn. + user_hostname = poap_cfg.hostname + api_hostname = bs.get("hostname", "") + if api_hostname and api_hostname != user_hostname: + log.warning( + f"Bootstrap ({serial_number}): API hostname '{api_hostname}' overrides " + f"user-provided hostname '{user_hostname}'. Using API value." + ) + hostname = api_hostname + else: + hostname = user_hostname + + # Role: switch_cfg.role is user-provided; if the bootstrap API carries a + # role and it differs, the API value wins and we warn. + api_role_raw = bs.get("switchRole") or bs_data.get("switchRole") + if api_role_raw: + try: + api_role = SwitchRole.normalize(api_role_raw) + if api_role and api_role != switch_role: + log.warning( + f"Bootstrap ({serial_number}): API role '{api_role_raw}' overrides " + f"user-provided role '{switch_role}'. Using API value." + ) + switch_role = api_role + except Exception: + pass # Build the data block from resolved values (replaces build_poap_data_block) data_block: Optional[Dict[str, Any]] = None @@ -1535,38 +1562,38 @@ def _import_bootstrap_switches( def _build_preprovision_model( self, switch_cfg: SwitchConfigModel, - poap_cfg: POAPConfigModel, + preprov_cfg: "PreprovisionConfigModel", ) -> PreProvisionSwitchModel: - """Build a pre-provision model from POAP configuration. + """Build a pre-provision model from PreprovisionConfigModel configuration. Args: switch_cfg: Parent switch config. - poap_cfg: POAP config entry. + preprov_cfg: Pre-provision config entry. Returns: Completed ``PreProvisionSwitchModel`` for API submission. """ log = self.ctx.log log.debug( - f"ENTER: _build_preprovision_model(serial={poap_cfg.preprovision_serial})" + f"ENTER: _build_preprovision_model(serial={preprov_cfg.serial_number})" ) - serial_number = poap_cfg.preprovision_serial - hostname = poap_cfg.hostname + serial_number = preprov_cfg.serial_number + hostname = preprov_cfg.hostname ip = switch_cfg.seed_ip - model_name = poap_cfg.model - version = poap_cfg.version - image_policy = poap_cfg.image_policy - gateway_ip_mask = poap_cfg.config_data.gateway if poap_cfg.config_data else None + model_name = preprov_cfg.model + version = preprov_cfg.version + image_policy = preprov_cfg.image_policy + gateway_ip_mask = preprov_cfg.config_data.gateway switch_role = switch_cfg.role password = switch_cfg.password auth_proto = SnmpV3AuthProtocol.MD5 # Pre-provision always uses MD5 - discovery_username = getattr(poap_cfg, "discovery_username", None) - discovery_password = getattr(poap_cfg, "discovery_password", None) + discovery_username = getattr(preprov_cfg, "discovery_username", None) + discovery_password = getattr(preprov_cfg, "discovery_password", None) - # Shared data block builder - data_block = build_poap_data_block(poap_cfg) + # Build data block from mandatory config_data + data_block = build_poap_data_block(preprov_cfg) preprov_model = PreProvisionSwitchModel( serialNumber=serial_number, @@ -1655,13 +1682,15 @@ def _preprovision_switches( def _handle_poap_swap( self, - swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]], + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"]], existing: List[SwitchDataModel], ) -> None: """Process POAP serial-swap entries. Args: - swap_entries: ``(SwitchConfigModel, POAPConfigModel)`` swap pairs. + swap_entries: ``(SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel)`` + swap triples where poap carries the new serial and preprovision + carries the old (pre-provisioned) serial. existing: Current fabric inventory snapshot. Returns: @@ -1688,8 +1717,8 @@ def _handle_poap_swap( f"{list(fabric_index.keys())}" ) - for switch_cfg, poap_cfg in swap_entries: - old_serial = poap_cfg.preprovision_serial + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + old_serial = preprov_cfg.serial_number if old_serial not in fabric_index: msg = ( f"Pre-provisioned serial '{old_serial}' not found in " @@ -1713,7 +1742,7 @@ def _handle_poap_swap( f"{list(bootstrap_index.keys())}" ) - for switch_cfg, poap_cfg in swap_entries: + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: new_serial = poap_cfg.serial_number if new_serial not in bootstrap_index: msg = ( @@ -1732,8 +1761,8 @@ def _handle_poap_swap( # ------------------------------------------------------------------ # Step 3: Call changeSwitchSerialNumber for each swap entry # ------------------------------------------------------------------ - for switch_cfg, poap_cfg in swap_entries: - old_serial = poap_cfg.preprovision_serial + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + old_serial = preprov_cfg.serial_number new_serial = poap_cfg.serial_number log.info( @@ -1804,7 +1833,7 @@ def _handle_poap_swap( # Step 5: Build BootstrapImportSwitchModels and POST importBootstrap # ------------------------------------------------------------------ import_models: List[BootstrapImportSwitchModel] = [] - for switch_cfg, poap_cfg in swap_entries: + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: new_serial = poap_cfg.serial_number bootstrap_data = post_swap_index.get(new_serial) @@ -1844,7 +1873,7 @@ def _handle_poap_swap( # Step 6: Wait for manageability, save credentials, finalize # ------------------------------------------------------------------ switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - for switch_cfg, poap_cfg in swap_entries: + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: switch_actions.append((poap_cfg.serial_number, switch_cfg)) self.fabric_ops.post_add_processing( diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 7037c024..1019ff05 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -107,11 +107,28 @@ default: false poap: description: - - POAP (PowerOn Auto Provisioning) configurations for bootstrap/preprovision. + - Bootstrap POAP config for the switch. + - C(serial_number) and C(hostname) are mandatory. + - Model, version, and config data are sourced from the bootstrap API at runtime. + - If the bootstrap API reports a different hostname or role, the API value + overrides the user-provided value and a warning is logged. + - To perform a B(swap) operation, provide both C(poap) and C(preprovision) + under the same switch config. Only C(serial_number) is required in each. - POAP and DHCP must be enabled in fabric before using. - type: list - elements: dict + type: dict suboptions: + serial_number: + description: + - Serial number of the physical switch to Bootstrap. + - Required for bootstrap and swap operations. + type: str + required: true + hostname: + description: + - Hostname for the switch during bootstrap. + - Overridden by the bootstrap API value when they differ (warning logged). + type: str + required: true discovery_username: description: - Username for device discovery during POAP. @@ -121,53 +138,74 @@ - Password for device discovery during POAP. type: str no_log: true - serial_number: + image_policy: description: - - Serial number of the physical switch to Bootstrap. - - When used together with C(preprovision_serial), performs a swap operation - that changes the serial number of a pre-provisioned switch and then - imports it via bootstrap. + - Name of the image policy to be applied on the switch. type: str - preprovision_serial: + preprovision: + description: + - Pre-provision config for the switch. + - All five fields are mandatory since the controller has no physical switch + to pull values from. + - To perform a B(swap) operation, provide both C(poap) and C(preprovision) + under the same switch config. Only C(serial_number) is required in each; + extra fields are ignored with a warning. + - POAP and DHCP must be enabled in fabric before using. + type: dict + suboptions: + serial_number: description: - - Serial number of switch to Pre-provision. - - When used together with C(serial_number), performs a swap operation - that changes the serial number of this pre-provisioned switch to - C(serial_number) and then imports it via bootstrap. + - Serial number of the switch to Pre-provision. type: str + required: true model: description: - - Model of switch to Bootstrap/Pre-provision. + - Model of the switch to Pre-provision (e.g., N9K-C93180YC-EX). type: str + required: true version: description: - - Software version of switch. + - Software version of the switch to Pre-provision (e.g., 10.3(1)). type: str + required: true hostname: description: - - Hostname for the switch. - type: str - image_policy: - description: - - Image policy to apply. + - Hostname for the switch during pre-provision. type: str + required: true config_data: description: - - Basic configuration data for the switch during Bootstrap/Pre-provision. + - Basic configuration data for the switch during Pre-provision. - C(models) and C(gateway) are mandatory. - - C(models) is list of model of modules in switch to Bootstrap/Pre-provision. + - C(models) is a list of module models in the switch. - C(gateway) is the gateway IP with mask for the switch. type: dict + required: true suboptions: models: description: - - List of module models in the switch (e.g., N9K-X9364v, N9K-vSUP). + - List of module models in the switch (e.g., [N9K-X9364v, N9K-vSUP]). type: list elements: str + required: true gateway: description: - Gateway IP with subnet mask (e.g., 192.168.0.1/24). type: str + required: true + discovery_username: + description: + - Username for device discovery during pre-provision. + type: str + discovery_password: + description: + - Password for device discovery during pre-provision. + type: str + no_log: true + image_policy: + description: + - Image policy to apply during pre-provision. + type: str rma: description: - RMA an existing switch with a new one. @@ -243,9 +281,9 @@ treated as idempotent and will attempt the bootstrap again. - Idempotence for B(Pre-provision) - A pre-provision entry is considered idempotent when the C(seed_ip) already exists in the fabric inventory, regardless of the - C(preprovision_serial) value. Because the pre-provision serial is a placeholder - that may differ from the real hardware serial, only the IP address is used as - the stable identity for idempotency checks. + C(serial_number) value under C(preprovision). Because the pre-provision serial is + a placeholder that may differ from the real hardware serial, only the IP address + is used as the stable identity for idempotency checks. - Idempotence for B(normal discovery) - A switch is considered idempotent when its C(seed_ip) already exists in the fabric inventory with no configuration drift (same role). @@ -284,12 +322,15 @@ - seed_ip: 192.168.10.1 username: admin password: "{{ switch_password }}" - poap: - - preprovision_serial: SAL1234ABCD - model: N9K-C93180YC-EX - version: "10.3(1)" - hostname: leaf-preprov - gateway_ip: 192.168.10.1/24 + preprovision: + serial_number: SAL1234ABCD + model: N9K-C93180YC-EX + version: "10.3(1)" + hostname: leaf-preprov + config_data: + models: + - N9K-C93180YC-EX + gateway: 192.168.10.1/24 state: merged - name: Bootstrap a switch via POAP @@ -300,11 +341,8 @@ username: admin password: "{{ switch_password }}" poap: - - serial_number: SAL5678EFGH - model: N9K-C93180YC-EX - version: "10.3(1)" - hostname: leaf-bootstrap - gateway_ip: 192.168.10.1/24 + serial_number: SAL5678EFGH + hostname: leaf-bootstrap state: merged - name: Swap serial number on a pre-provisioned switch (POAP swap) @@ -315,8 +353,9 @@ username: admin password: "{{ switch_password }}" poap: - - serial_number: SAL5678EFGH - preprovision_serial: SAL1234ABCD + serial_number: SAL5678EFGH + preprovision: + serial_number: SAL1234ABCD state: merged - name: RMA - Replace a switch diff --git a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 index 94af1f1b..9fbc38ce 100644 --- a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 +++ b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 @@ -19,34 +19,46 @@ {% if switch.role is defined %} {% set _ = switch_item.update({'role': switch.role | default('') }) %} {% endif %} -{% if switch.poap is defined %} -{% for sw_poap_item in switch.poap %} +{% if switch.poap is defined and switch.poap %} {% set poap_item = {} %} -{% if sw_poap_item.preprovision_serial is defined and sw_poap_item.preprovision_serial %} -{% set _ = poap_item.update({'preprovision_serial': sw_poap_item.preprovision_serial}) %} +{% set _ = poap_item.update({'serial_number': switch.poap.serial_number}) %} +{% set _ = poap_item.update({'hostname': switch.poap.hostname}) %} +{% if switch.poap.image_policy is defined and switch.poap.image_policy %} +{% set _ = poap_item.update({'image_policy': switch.poap.image_policy}) %} {% endif %} -{% if sw_poap_item.serial_number is defined and sw_poap_item.serial_number %} -{% set _ = poap_item.update({'serial_number': sw_poap_item.serial_number}) %} +{% if switch.poap.discovery_username is defined and switch.poap.discovery_username %} +{% set _ = poap_item.update({'discovery_username': switch.poap.discovery_username}) %} {% endif %} -{% if sw_poap_item.model is defined and sw_poap_item.model %} -{% set _ = poap_item.update({'model': sw_poap_item.model}) %} +{% if switch.poap.discovery_password is defined and switch.poap.discovery_password %} +{% set _ = poap_item.update({'discovery_password': switch.poap.discovery_password}) %} {% endif %} -{% if sw_poap_item.version is defined and sw_poap_item.version %} -{% set _ = poap_item.update({'version': sw_poap_item.version}) %} +{% set _ = switch_item.update({'poap': poap_item}) %} {% endif %} -{% if sw_poap_item.hostname is defined and sw_poap_item.hostname %} -{% set _ = poap_item.update({'hostname': sw_poap_item.hostname}) %} -{% endif %} -{% if sw_poap_item.config_data is defined %} -{% set poap_config_item = {} %} -{% for sw_poap_config_item in sw_poap_item.config_data %} -{% set _ = poap_config_item.update({sw_poap_config_item: sw_poap_item.config_data[sw_poap_config_item]}) %} +{% if switch.preprovision is defined and switch.preprovision %} +{% set preprov_item = {} %} +{% set _ = preprov_item.update({'serial_number': switch.preprovision.serial_number}) %} +{% set _ = preprov_item.update({'model': switch.preprovision.model}) %} +{% set _ = preprov_item.update({'version': switch.preprovision.version}) %} +{% set _ = preprov_item.update({'hostname': switch.preprovision.hostname}) %} +{% if switch.preprovision.config_data is defined %} +{% set preprov_config = {} %} +{% for k in switch.preprovision.config_data %} +{% set _ = preprov_config.update({k: switch.preprovision.config_data[k]}) %} {% endfor %} -{% set _ = poap_item.update({'config_data': poap_config_item}) %} +{% set _ = preprov_item.update({'config_data': preprov_config}) %} {% endif %} -{% set _ = switch_item.update({'poap': [poap_item]}) %} -{% endfor %} -{% else %} +{% if switch.preprovision.image_policy is defined and switch.preprovision.image_policy %} +{% set _ = preprov_item.update({'image_policy': switch.preprovision.image_policy}) %} +{% endif %} +{% if switch.preprovision.discovery_username is defined and switch.preprovision.discovery_username %} +{% set _ = preprov_item.update({'discovery_username': switch.preprovision.discovery_username}) %} +{% endif %} +{% if switch.preprovision.discovery_password is defined and switch.preprovision.discovery_password %} +{% set _ = preprov_item.update({'discovery_password': switch.preprovision.discovery_password}) %} +{% endif %} +{% set _ = switch_item.update({'preprovision': preprov_item}) %} +{% endif %} +{% if switch.poap is not defined and switch.preprovision is not defined %} {% if switch.auth_proto is defined %} {% set _ = switch_item.update({'auth_proto': switch.auth_proto | default('') }) %} {% endif %} diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml index 62c3bd98..4b569004 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml @@ -121,14 +121,14 @@ username: '{{ switch_username }}' password: '{{ switch_password }}' role: border - poap: - - preprovision_serial: "{{ test_data.sw2_serial }}" - model: "{{ test_data.poap_model }}" - version: "{{ test_data.poap_version }}" - hostname: "{{ test_data.prepro_hostname }}" - config_data: - models: "{{ test_data.poap_configmodel }}" - gateway: "{{ test_data.poap_gateway }}" + preprovision: + serial_number: "{{ test_data.sw2_serial }}" + model: "{{ test_data.poap_model }}" + version: "{{ test_data.poap_version }}" + hostname: "{{ test_data.prepro_hostname }}" + config_data: + models: "{{ test_data.poap_configmodel }}" + gateway: "{{ test_data.poap_gateway }}" when: poap_enabled == True delegate_to: localhost tags: poap @@ -187,15 +187,12 @@ ansible.builtin.set_fact: switch_conf: - seed_ip: "{{ test_data.sw1 }}" + username: '{{ switch_username }}' + password: '{{ switch_password }}' role: leaf poap: - - serial_number: "{{ test_data.sw1_serial }}" - model: "{{ test_data.poap_model }}" - version: "{{ test_data.poap_version }}" - hostname: "{{ test_data.poap_hostname }}" - config_data: - models: "{{ test_data.poap_configmodel }}" - gateway: "{{ test_data.poap_gateway }}" + serial_number: "{{ test_data.sw1_serial }}" + hostname: "{{ test_data.poap_hostname }}" - seed_ip: "{{ test_data.sw3 }}" auth_proto: MD5 role: spine From c53e6fccc086bab3a6848bb13e1b233dd1a7fb84 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 27 Mar 2026 00:33:14 +0530 Subject: [PATCH 071/109] NDOutput Integration --- plugins/module_utils/nd_switch_resources.py | 37 +++++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/nd_switch_resources.py index 565bdf49..ec62900b 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/nd_switch_resources.py @@ -24,6 +24,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection +from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches import ( SwitchRole, @@ -2355,7 +2356,8 @@ def __init__( response_data=self._query_all_switches(), model_class=SwitchDataModel, ) - self.previous: NDConfigCollection = self.existing.copy() + self.before: NDConfigCollection = self.existing.copy() + self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) except Exception as e: msg = ( f"Failed to query fabric '{self.fabric}' inventory " @@ -2366,6 +2368,8 @@ def __init__( # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] + self.output: NDOutput = NDOutput(output_level=self.module.params.get("output_level", "normal")) + self.output.assign(before=self.before, after=self.existing) # Utility instances (SwitchWaitUtils / FabricUtils depend on self) self.fabric_utils = FabricUtils(self.nd, self.fabric, log) @@ -2406,16 +2410,17 @@ def exit_json(self) -> None: ) self.log.error(msg) self.nd.module.fail_json(msg=msg) - final["gathered"] = gathered + self.output.assign(after=self.existing) + final.update(self.output.format(gathered=gathered)) else: # Re-query the fabric to get the actual post-operation inventory so - # that "current" reflects real state rather than the pre-op snapshot. + # that "after" reflects real state rather than the pre-op snapshot. if True not in self.results.failed and not self.nd.module.check_mode: self.existing = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel ) - final["previous"] = self.previous.to_ansible_config() - final["current"] = self.existing.to_ansible_config() + self.output.assign(after=self.existing, diff=self.sent) + final.update(self.output.format()) if True in self.results.failed: self.nd.module.fail_json(**final) @@ -2464,9 +2469,14 @@ def manage_state(self) -> None: self.config, self.state, self.nd, self.log ) # Partition configs by operation type - poap_configs = [c for c in proposed_config if c.operation_type == "poap"] + poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] rma_configs = [c for c in proposed_config if c.operation_type == "rma"] - normal_configs = [c for c in proposed_config if c.operation_type not in ("poap", "rma")] + normal_configs = [c for c in proposed_config if c.operation_type == "normal"] + # Capture all proposed configs for NDOutput + output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) + for cfg in proposed_config: + output_proposed.add(cfg) + self.output.assign(proposed=output_proposed) self.log.info( f"Config partition: {len(normal_configs)} normal, " @@ -2584,6 +2594,7 @@ def _handle_merged_state( # Collect (serial_number, SwitchConfigModel) pairs for post-processing switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + _bulk_added_ips: set = set() # Phase 4: Bulk add new switches to fabric if switches_to_add and discovered_data: @@ -2622,6 +2633,7 @@ def _handle_merged_state( platform_type=platform_type, preserve_config=preserve_config, ) + _bulk_added_ips.update(cfg.seed_ip for cfg, _ in pairs) for cfg, disc in pairs: sn = disc.get("serialNumber") @@ -2631,6 +2643,13 @@ def _handle_merged_state( # Phase 5: Collect migration switches for post-processing # Migration mode switches get role updates during post-add processing. + # Track newly added switches in self.sent + if switches_to_add: + _sw_by_ip = {sw.fabric_management_ip: sw for sw in switches_to_add} + for ip in _bulk_added_ips: + sw_data = _sw_by_ip.get(ip) + if sw_data: + self.sent.add(sw_data) have_migration_switches = False if migration_switches: @@ -2826,6 +2845,8 @@ def _handle_overridden_state( ) self.log.error(msg) self.nd.module.fail_json(msg=msg) + for sw in switches_to_delete: + self.sent.add(sw) diff["to_update"] = [] @@ -2941,6 +2962,8 @@ def _handle_deleted_state( f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" ) self.fabric_ops.bulk_delete(switches_to_delete) + for sw in switches_to_delete: + self.sent.add(sw) self.log.debug("EXIT: _handle_deleted_state()") # ===================================================================== From 91a05c6d66f1080702e77aca07f864fa75b3455a Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 27 Mar 2026 01:10:29 +0530 Subject: [PATCH 072/109] Utils restructuring --- .../module_utils/manage_switches/__init__.py | 34 ++ .../nd_switch_resources.py | 6 +- .../utils.py} | 317 +++++++++++++++++- plugins/module_utils/utils.py | 175 +++++++++- .../utils/manage_switches/__init__.py | 46 --- .../utils/manage_switches/bootstrap_utils.py | 111 ------ .../utils/manage_switches/exceptions.py | 20 -- .../utils/manage_switches/fabric_utils.py | 179 ---------- .../utils/manage_switches/payload_utils.py | 90 ----- .../utils/manage_switches/switch_helpers.py | 138 -------- plugins/modules/nd_manage_switches.py | 2 +- 11 files changed, 526 insertions(+), 592 deletions(-) create mode 100644 plugins/module_utils/manage_switches/__init__.py rename plugins/module_utils/{ => manage_switches}/nd_switch_resources.py (99%) rename plugins/module_utils/{utils/manage_switches/switch_wait_utils.py => manage_switches/utils.py} (70%) delete mode 100644 plugins/module_utils/utils/manage_switches/__init__.py delete mode 100644 plugins/module_utils/utils/manage_switches/bootstrap_utils.py delete mode 100644 plugins/module_utils/utils/manage_switches/exceptions.py delete mode 100644 plugins/module_utils/utils/manage_switches/fabric_utils.py delete mode 100644 plugins/module_utils/utils/manage_switches/payload_utils.py delete mode 100644 plugins/module_utils/utils/manage_switches/switch_helpers.py diff --git a/plugins/module_utils/manage_switches/__init__.py b/plugins/module_utils/manage_switches/__init__.py new file mode 100644 index 00000000..aa6dfd90 --- /dev/null +++ b/plugins/module_utils/manage_switches/__init__.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""nd_manage_switches package. + +Re-exports the orchestrator and utility classes so that consumers can +import directly from the package. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import ( # noqa: F401 + NDSwitchResourceModule, +) +from ansible_collections.cisco.nd.plugins.module_utils.utils import ( # noqa: F401 + SwitchOperationError, + FabricUtils, +) +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.utils import ( # noqa: F401 + PayloadUtils, + SwitchWaitUtils, + mask_password, + get_switch_field, + determine_operation_type, + group_switches_by_credentials, + query_bootstrap_switches, + build_bootstrap_index, + build_poap_data_block, +) diff --git a/plugins/module_utils/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py similarity index 99% rename from plugins/module_utils/nd_switch_resources.py rename to plugins/module_utils/manage_switches/nd_switch_resources.py index ec62900b..6b9a1b99 100644 --- a/plugins/module_utils/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -49,10 +49,12 @@ PreprovisionConfigModel, RMAConfigModel, ) -from ansible_collections.cisco.nd.plugins.module_utils.utils.manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.utils import ( FabricUtils, - SwitchWaitUtils, SwitchOperationError, +) +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.utils import ( + SwitchWaitUtils, mask_password, get_switch_field, group_switches_by_credentials, diff --git a/plugins/module_utils/utils/manage_switches/switch_wait_utils.py b/plugins/module_utils/manage_switches/utils.py similarity index 70% rename from plugins/module_utils/utils/manage_switches/switch_wait_utils.py rename to plugins/module_utils/manage_switches/utils.py index 2d6e281d..ed47393c 100644 --- a/plugins/module_utils/utils/manage_switches/switch_wait_utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -4,7 +4,10 @@ # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -"""Multi-phase wait utilities for switch lifecycle operations.""" +"""Utility helpers for nd_manage_switches: exceptions, fabric operations, +payload construction, credential grouping, bootstrap queries, and +multi-phase switch wait utilities. +""" from __future__ import absolute_import, division, print_function @@ -12,8 +15,12 @@ import logging import time -from typing import Any, Dict, List, Optional +from copy import deepcopy +from typing import Any, Dict, List, Optional, Tuple, Union +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( + EpManageFabricsBootstrapGet, +) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_inventory import ( EpManageFabricsInventoryDiscoverGet, ) @@ -23,8 +30,300 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( EpManageFabricsSwitchActionsRediscoverPost, ) +from ansible_collections.cisco.nd.plugins.module_utils.utils import ( + FabricUtils, + SwitchOperationError, +) + + +# ========================================================================= +# Payload Utilities +# ========================================================================= + + +def mask_password(payload: Dict[str, Any]) -> Dict[str, Any]: + """Return a deep copy of *payload* with password fields masked. + + Useful for safe logging of API payloads that contain credentials. + + Args: + payload: API payload dict (may contain ``password`` keys). + + Returns: + Copy with every ``password`` value replaced by ``"********"``. + """ + masked = deepcopy(payload) + if "password" in masked: + masked["password"] = "********" + if isinstance(masked.get("switches"), list): + for switch in masked["switches"]: + if isinstance(switch, dict) and "password" in switch: + switch["password"] = "********" + return masked + + +class PayloadUtils: + """Stateless helper for building ND Switch Resource API request payloads.""" + + def __init__(self, logger: Optional[logging.Logger] = None): + """Initialize PayloadUtils. + + Args: + logger: Optional logger; defaults to ``nd.PayloadUtils``. + """ + self.log = logger or logging.getLogger("nd.PayloadUtils") + + def build_credentials_payload( + self, + serial_numbers: List[str], + username: str, + password: str, + ) -> Dict[str, Any]: + """Build payload for saving switch credentials. + + Args: + serial_numbers: Switch serial numbers. + username: Switch username. + password: Switch password. -from .fabric_utils import FabricUtils + Returns: + Credentials API payload dict. + """ + return { + "switchIds": serial_numbers, + "username": username, + "password": password, + } + + def build_switch_ids_payload( + self, + serial_numbers: List[str], + ) -> Dict[str, Any]: + """Build payload with switch IDs for remove / batch operations. + + Args: + serial_numbers: Switch serial numbers. + + Returns: + ``{"switchIds": [...]}`` payload dict. + """ + return {"switchIds": serial_numbers} + + +# ========================================================================= +# Switch Helpers +# ========================================================================= + + +def get_switch_field( + switch, + field_names: List[str], +) -> Optional[Any]: + """Extract a field value from a switch config, trying multiple names. + + Supports Pydantic models and plain dicts with both snake_case and + camelCase key lookups. + + Args: + switch: Switch model or dict to extract from. + field_names: Candidate field names to try, in priority order. + + Returns: + First non-``None`` value found, or ``None``. + """ + for name in field_names: + if hasattr(switch, name): + value = getattr(switch, name) + if value is not None: + return value + elif isinstance(switch, dict): + if name in switch and switch[name] is not None: + return switch[name] + # Try camelCase variant + camel = ''.join( + word.capitalize() if i > 0 else word + for i, word in enumerate(name.split('_')) + ) + if camel in switch and switch[camel] is not None: + return switch[camel] + return None + + +def determine_operation_type(switch) -> str: + """Determine the operation type from switch configuration. + + Args: + switch: A ``SwitchConfigModel``, ``SwitchDiscoveryModel``, + or raw dict. + + Returns: + ``'normal'``, ``'poap'``, or ``'rma'``. + """ + # Pydantic model with .operation_type attribute + if hasattr(switch, 'operation_type'): + return switch.operation_type + + if isinstance(switch, dict): + if 'poap' in switch or 'bootstrap' in switch: + return 'poap' + if ( + 'rma' in switch + or 'old_serial' in switch + or 'oldSerial' in switch + ): + return 'rma' + + return 'normal' + + +def group_switches_by_credentials( + switches, + log: logging.Logger, +) -> Dict[Tuple, list]: + """Group switches by shared credentials for bulk API operations. + + Args: + switches: Validated ``SwitchConfigModel`` instances. + log: Logger. + + Returns: + Dict mapping a ``(username, password_hash, auth_proto, + platform_type, preserve_config)`` tuple to the list of switches + sharing those credentials. + """ + groups: Dict[Tuple, list] = {} + + for switch in switches: + password_hash = hash(switch.password) + group_key = ( + switch.username, + password_hash, + switch.auth_proto, + switch.platform_type, + switch.preserve_config, + ) + groups.setdefault(group_key, []).append(switch) + + log.info( + f"Grouped {len(switches)} switches into " + f"{len(groups)} credential group(s)" + ) + + for idx, (key, group_switches) in enumerate(groups.items(), 1): + username, _, auth_proto, platform_type, preserve_config = key + auth_value = ( + auth_proto.value + if hasattr(auth_proto, 'value') + else str(auth_proto) + ) + platform_value = ( + platform_type.value + if hasattr(platform_type, 'value') + else str(platform_type) + ) + log.debug( + f"Group {idx}: {len(group_switches)} switches with " + f"username={username}, auth={auth_value}, " + f"platform={platform_value}, " + f"preserve_config={preserve_config}" + ) + + return groups + + +# ========================================================================= +# Bootstrap Utilities +# ========================================================================= + + +def query_bootstrap_switches( + nd, + fabric: str, + log: logging.Logger, +) -> List[Dict[str, Any]]: + """GET switches currently in the bootstrap (POAP / PnP) loop. + + Args: + nd: NDModule instance (REST client). + fabric: Fabric name. + log: Logger. + + Returns: + List of raw switch dicts from the bootstrap API. + """ + log.debug("ENTER: query_bootstrap_switches()") + + endpoint = EpManageFabricsBootstrapGet() + endpoint.fabric_name = fabric + log.debug(f"Bootstrap endpoint: {endpoint.path}") + + try: + result = nd.request( + path=endpoint.path, verb=endpoint.verb, + ) + except Exception as e: + msg = ( + f"Failed to query bootstrap switches for " + f"fabric '{fabric}': {e}" + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + if isinstance(result, dict): + switches = result.get("switches", []) + elif isinstance(result, list): + switches = result + else: + switches = [] + + log.info( + f"Bootstrap API returned {len(switches)} " + f"switch(es) in POAP loop" + ) + log.debug("EXIT: query_bootstrap_switches()") + return switches + + +def build_bootstrap_index( + bootstrap_switches: List[Dict[str, Any]], +) -> Dict[str, Dict[str, Any]]: + """Build a serial-number-keyed index from bootstrap API data. + + Args: + bootstrap_switches: Raw switch dicts from the bootstrap API. + + Returns: + Dict mapping ``serial_number`` -> switch dict. + """ + return { + sw.get("serialNumber", sw.get("serial_number", "")): sw + for sw in bootstrap_switches + } + + +def build_poap_data_block(poap_cfg) -> Optional[Dict[str, Any]]: + """Build optional data block for bootstrap and pre-provision models. + + Args: + poap_cfg: ``POAPConfigModel`` from the user playbook. + + Returns: + Data block dict, or ``None`` if no ``config_data`` is present. + """ + if not poap_cfg.config_data: + return None + data_block: Dict[str, Any] = {} + gateway = poap_cfg.config_data.gateway + if gateway: + data_block["gatewayIpMask"] = gateway + if poap_cfg.config_data.models: + data_block["models"] = poap_cfg.config_data.models + return data_block or None + + +# ========================================================================= +# Switch Wait Utilities +# ========================================================================= class SwitchWaitUtils: @@ -80,7 +379,7 @@ def __init__( fabric: Fabric name. logger: Optional logger; defaults to ``nd.SwitchWaitUtils``. max_attempts: Max polling iterations (default ``300``). - wait_interval: Seconds between polls (default ``5``). + wait_interval: Override interval in seconds (default ``5``). fabric_utils: Optional ``FabricUtils`` instance for fabric info queries. Created internally if not provided. """ @@ -680,5 +979,15 @@ def _is_greenfield_debug_enabled(self) -> bool: __all__ = [ + "SwitchOperationError", + "PayloadUtils", + "FabricUtils", "SwitchWaitUtils", + "mask_password", + "get_switch_field", + "determine_operation_type", + "group_switches_by_credentials", + "query_bootstrap_switches", + "build_bootstrap_index", + "build_poap_data_block", ] diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 7d05e4af..44a55195 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -4,8 +4,18 @@ from __future__ import absolute_import, division, print_function +import logging +import time from copy import deepcopy -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Optional, Union + +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricConfigDeployPost, + EpManageFabricGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsConfigSavePost, +) def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -76,3 +86,166 @@ def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) pass return data + + +# ========================================================================= +# Exceptions +# ========================================================================= + + +class SwitchOperationError(Exception): + """Raised when a switch operation fails.""" + + +# ========================================================================= +# Fabric Utilities +# ========================================================================= + + +class FabricUtils: + """Fabric-level operations: config save, deploy, and info retrieval.""" + + def __init__( + self, + nd_module, + fabric: str, + logger: Optional[logging.Logger] = None, + ): + """Initialize FabricUtils. + + Args: + nd_module: NDModule or NDNetworkResourceModule instance. + fabric: Fabric name. + logger: Optional logger; defaults to ``nd.FabricUtils``. + """ + self.nd = nd_module + self.fabric = fabric + self.log = logger or logging.getLogger("nd.FabricUtils") + + # Pre-configure endpoints + self.ep_config_save = EpManageFabricsActionsConfigSavePost() + self.ep_config_save.fabric_name = fabric + + self.ep_config_deploy = EpManageFabricConfigDeployPost() + self.ep_config_deploy.fabric_name = fabric + + self.ep_fabric_get = EpManageFabricGet() + self.ep_fabric_get.fabric_name = fabric + + # ----------------------------------------------------------------- + # Public API + # ----------------------------------------------------------------- + + def save_config( + self, + max_retries: int = 3, + retry_delay: int = 600, + ) -> Dict[str, Any]: + """Save (recalculate) fabric configuration. + + Retries up to ``max_retries`` times with ``retry_delay`` seconds + between attempts. + + Args: + max_retries: Maximum number of attempts (default ``3``). + retry_delay: Seconds to wait between failed attempts + (default ``600``). + + Returns: + API response dict from the first successful attempt. + + Raises: + SwitchOperationError: If all attempts fail. + """ + last_error: Exception = SwitchOperationError( + f"Config save produced no attempts for fabric {self.fabric}" + ) + for attempt in range(1, max_retries + 1): + try: + response = self._request_endpoint( + self.ep_config_save, action="Config save" + ) + self.log.info( + f"Config save succeeded on attempt " + f"{attempt}/{max_retries} for fabric {self.fabric}" + ) + return response + except SwitchOperationError as exc: + last_error = exc + self.log.warning( + f"Config save attempt {attempt}/{max_retries} failed " + f"for fabric {self.fabric}: {exc}" + ) + if attempt < max_retries: + self.log.info( + f"Retrying config save in {retry_delay}s " + f"(attempt {attempt + 1}/{max_retries})" + ) + time.sleep(retry_delay) + raise SwitchOperationError( + f"Config save failed after {max_retries} attempt(s) " + f"for fabric {self.fabric}: {last_error}" + ) + + def deploy_config(self) -> Dict[str, Any]: + """Deploy pending configuration to all switches in the fabric. + + The ``configDeploy`` endpoint requires no request body; it deploys + all pending changes for the fabric. + + Returns: + API response dict. + + Raises: + SwitchOperationError: If the deploy request fails. + """ + return self._request_endpoint( + self.ep_config_deploy, action="Config deploy" + ) + + def get_fabric_info(self) -> Dict[str, Any]: + """Retrieve fabric information. + + Returns: + Fabric information dict. + + Raises: + SwitchOperationError: If the request fails. + """ + return self._request_endpoint( + self.ep_fabric_get, action="Get fabric info" + ) + + # ----------------------------------------------------------------- + # Internal helpers + # ----------------------------------------------------------------- + + def _request_endpoint( + self, endpoint, action: str = "Request" + ) -> Dict[str, Any]: + """Execute a request against a pre-configured endpoint. + + Args: + endpoint: Endpoint object with ``.path`` and ``.verb``. + action: Human-readable label for log messages. + + Returns: + API response dict. + + Raises: + SwitchOperationError: On any request failure. + """ + self.log.info(f"{action} for fabric: {self.fabric}") + try: + response = self.nd.request(endpoint.path, verb=endpoint.verb) + self.log.info( + f"{action} completed for fabric: {self.fabric}" + ) + return response + except Exception as e: + self.log.error( + f"{action} failed for fabric {self.fabric}: {e}" + ) + raise SwitchOperationError( + f"{action} failed for fabric {self.fabric}: {e}" + ) from e diff --git a/plugins/module_utils/utils/manage_switches/__init__.py b/plugins/module_utils/utils/manage_switches/__init__.py deleted file mode 100644 index bb142fe1..00000000 --- a/plugins/module_utils/utils/manage_switches/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""nd_manage_switches utilities package. - -Re-exports all utility classes, functions, and exceptions so that -consumers can import directly from the package: - -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -from ansible_collections.cisco.nd.plugins.module_utils.utils.manage_switches.exceptions import SwitchOperationError # noqa: F401 -from .payload_utils import PayloadUtils, mask_password # noqa: F401 -from .fabric_utils import FabricUtils # noqa: F401 -from .switch_wait_utils import SwitchWaitUtils # noqa: F401 -from .switch_helpers import ( # noqa: F401 - get_switch_field, - determine_operation_type, - group_switches_by_credentials, -) -from .bootstrap_utils import ( # noqa: F401 - query_bootstrap_switches, - build_bootstrap_index, - build_poap_data_block, -) - - -__all__ = [ - "SwitchOperationError", - "PayloadUtils", - "FabricUtils", - "SwitchWaitUtils", - "mask_password", - "get_switch_field", - "determine_operation_type", - "group_switches_by_credentials", - "query_bootstrap_switches", - "build_bootstrap_index", - "build_poap_data_block", -] diff --git a/plugins/module_utils/utils/manage_switches/bootstrap_utils.py b/plugins/module_utils/utils/manage_switches/bootstrap_utils.py deleted file mode 100644 index d78d2531..00000000 --- a/plugins/module_utils/utils/manage_switches/bootstrap_utils.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -"""Bootstrap API helpers for POAP switch queries, serial-number indexing, and payload construction.""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import logging -from typing import Any, Dict, List, Optional - -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( - EpManageFabricsBootstrapGet, -) - - -def query_bootstrap_switches( - nd, - fabric: str, - log: logging.Logger, -) -> List[Dict[str, Any]]: - """GET switches currently in the bootstrap (POAP / PnP) loop. - - Args: - nd: NDModule instance (REST client). - fabric: Fabric name. - log: Logger. - - Returns: - List of raw switch dicts from the bootstrap API. - """ - log.debug("ENTER: query_bootstrap_switches()") - - endpoint = EpManageFabricsBootstrapGet() - endpoint.fabric_name = fabric - log.debug(f"Bootstrap endpoint: {endpoint.path}") - - try: - result = nd.request( - path=endpoint.path, verb=endpoint.verb, - ) - except Exception as e: - msg = ( - f"Failed to query bootstrap switches for " - f"fabric '{fabric}': {e}" - ) - log.error(msg) - nd.module.fail_json(msg=msg) - - if isinstance(result, dict): - switches = result.get("switches", []) - elif isinstance(result, list): - switches = result - else: - switches = [] - - log.info( - f"Bootstrap API returned {len(switches)} " - f"switch(es) in POAP loop" - ) - log.debug("EXIT: query_bootstrap_switches()") - return switches - - -def build_bootstrap_index( - bootstrap_switches: List[Dict[str, Any]], -) -> Dict[str, Dict[str, Any]]: - """Build a serial-number-keyed index from bootstrap API data. - - Args: - bootstrap_switches: Raw switch dicts from the bootstrap API. - - Returns: - Dict mapping ``serial_number`` -> switch dict. - """ - return { - sw.get("serialNumber", sw.get("serial_number", "")): sw - for sw in bootstrap_switches - } - - -def build_poap_data_block(poap_cfg) -> Optional[Dict[str, Any]]: - """Build optional data block for bootstrap and pre-provision models. - - Args: - poap_cfg: ``POAPConfigModel`` from the user playbook. - - Returns: - Data block dict, or ``None`` if no ``config_data`` is present. - """ - if not poap_cfg.config_data: - return None - data_block: Dict[str, Any] = {} - gateway = poap_cfg.config_data.gateway - if gateway: - data_block["gatewayIpMask"] = gateway - if poap_cfg.config_data.models: - data_block["models"] = poap_cfg.config_data.models - return data_block or None - - -__all__ = [ - "query_bootstrap_switches", - "build_bootstrap_index", - "build_poap_data_block", -] diff --git a/plugins/module_utils/utils/manage_switches/exceptions.py b/plugins/module_utils/utils/manage_switches/exceptions.py deleted file mode 100644 index 8e5b0055..00000000 --- a/plugins/module_utils/utils/manage_switches/exceptions.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""Custom exceptions for ND Switch Resource operations.""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -class SwitchOperationError(Exception): - """Raised when a switch operation fails.""" - - -__all__ = [ - "SwitchOperationError", -] diff --git a/plugins/module_utils/utils/manage_switches/fabric_utils.py b/plugins/module_utils/utils/manage_switches/fabric_utils.py deleted file mode 100644 index ab4557da..00000000 --- a/plugins/module_utils/utils/manage_switches/fabric_utils.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""Fabric-level operations: config save, deploy, and info retrieval.""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import logging -import time -from typing import Any, Dict, Optional - -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( - EpManageFabricConfigDeployPost, - EpManageFabricGet, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( - EpManageFabricsActionsConfigSavePost, -) - -from .exceptions import SwitchOperationError - - -class FabricUtils: - """Fabric-level operations: config save, deploy, and info retrieval.""" - - def __init__( - self, - nd_module, - fabric: str, - logger: Optional[logging.Logger] = None, - ): - """Initialize FabricUtils. - - Args: - nd_module: NDModule or NDNetworkResourceModule instance. - fabric: Fabric name. - logger: Optional logger; defaults to ``nd.FabricUtils``. - """ - self.nd = nd_module - self.fabric = fabric - self.log = logger or logging.getLogger("nd.FabricUtils") - - # Pre-configure endpoints - self.ep_config_save = EpManageFabricsActionsConfigSavePost() - self.ep_config_save.fabric_name = fabric - - self.ep_config_deploy = EpManageFabricConfigDeployPost() - self.ep_config_deploy.fabric_name = fabric - - self.ep_fabric_get = EpManageFabricGet() - self.ep_fabric_get.fabric_name = fabric - - # ----------------------------------------------------------------- - # Public API - # ----------------------------------------------------------------- - - def save_config( - self, - max_retries: int = 3, - retry_delay: int = 600, - ) -> Dict[str, Any]: - """Save (recalculate) fabric configuration. - - Retries up to ``max_retries`` times with ``retry_delay`` seconds - between attempts. - - Args: - max_retries: Maximum number of attempts (default ``3``). - retry_delay: Seconds to wait between failed attempts - (default ``600``). - - Returns: - API response dict from the first successful attempt. - - Raises: - SwitchOperationError: If all attempts fail. - """ - last_error: Exception = SwitchOperationError( - f"Config save produced no attempts for fabric {self.fabric}" - ) - for attempt in range(1, max_retries + 1): - try: - response = self._request_endpoint( - self.ep_config_save, action="Config save" - ) - self.log.info( - f"Config save succeeded on attempt " - f"{attempt}/{max_retries} for fabric {self.fabric}" - ) - return response - except SwitchOperationError as exc: - last_error = exc - self.log.warning( - f"Config save attempt {attempt}/{max_retries} failed " - f"for fabric {self.fabric}: {exc}" - ) - if attempt < max_retries: - self.log.info( - f"Retrying config save in {retry_delay}s " - f"(attempt {attempt + 1}/{max_retries})" - ) - time.sleep(retry_delay) - raise SwitchOperationError( - f"Config save failed after {max_retries} attempt(s) " - f"for fabric {self.fabric}: {last_error}" - ) - - def deploy_config(self) -> Dict[str, Any]: - """Deploy pending configuration to all switches in the fabric. - - The ``configDeploy`` endpoint requires no request body; it deploys - all pending changes for the fabric. - - Returns: - API response dict. - - Raises: - SwitchOperationError: If the deploy request fails. - """ - return self._request_endpoint( - self.ep_config_deploy, action="Config deploy" - ) - - def get_fabric_info(self) -> Dict[str, Any]: - """Retrieve fabric information. - - Returns: - Fabric information dict. - - Raises: - SwitchOperationError: If the request fails. - """ - return self._request_endpoint( - self.ep_fabric_get, action="Get fabric info" - ) - - # ----------------------------------------------------------------- - # Internal helpers - # ----------------------------------------------------------------- - - def _request_endpoint( - self, endpoint, action: str = "Request" - ) -> Dict[str, Any]: - """Execute a request against a pre-configured endpoint. - - Args: - endpoint: Endpoint object with ``.path`` and ``.verb``. - action: Human-readable label for log messages. - - Returns: - API response dict. - - Raises: - SwitchOperationError: On any request failure. - """ - self.log.info(f"{action} for fabric: {self.fabric}") - try: - response = self.nd.request(endpoint.path, verb=endpoint.verb) - self.log.info( - f"{action} completed for fabric: {self.fabric}" - ) - return response - except Exception as e: - self.log.error( - f"{action} failed for fabric {self.fabric}: {e}" - ) - raise SwitchOperationError( - f"{action} failed for fabric {self.fabric}: {e}" - ) from e - - -__all__ = [ - "FabricUtils", -] diff --git a/plugins/module_utils/utils/manage_switches/payload_utils.py b/plugins/module_utils/utils/manage_switches/payload_utils.py deleted file mode 100644 index 84e99b99..00000000 --- a/plugins/module_utils/utils/manage_switches/payload_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""API payload builders for ND Switch Resource operations.""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import logging -from copy import deepcopy -from typing import Any, Dict, List, Optional - - -def mask_password(payload: Dict[str, Any]) -> Dict[str, Any]: - """Return a deep copy of *payload* with password fields masked. - - Useful for safe logging of API payloads that contain credentials. - - Args: - payload: API payload dict (may contain ``password`` keys). - - Returns: - Copy with every ``password`` value replaced by ``"********"``. - """ - masked = deepcopy(payload) - if "password" in masked: - masked["password"] = "********" - if isinstance(masked.get("switches"), list): - for switch in masked["switches"]: - if isinstance(switch, dict) and "password" in switch: - switch["password"] = "********" - return masked - - -class PayloadUtils: - """Stateless helper for building ND Switch Resource API request payloads.""" - - def __init__(self, logger: Optional[logging.Logger] = None): - """Initialize PayloadUtils. - - Args: - logger: Optional logger; defaults to ``nd.PayloadUtils``. - """ - self.log = logger or logging.getLogger("nd.PayloadUtils") - - def build_credentials_payload( - self, - serial_numbers: List[str], - username: str, - password: str, - ) -> Dict[str, Any]: - """Build payload for saving switch credentials. - - Args: - serial_numbers: Switch serial numbers. - username: Switch username. - password: Switch password. - - Returns: - Credentials API payload dict. - """ - return { - "switchIds": serial_numbers, - "username": username, - "password": password, - } - - def build_switch_ids_payload( - self, - serial_numbers: List[str], - ) -> Dict[str, Any]: - """Build payload with switch IDs for remove / batch operations. - - Args: - serial_numbers: Switch serial numbers. - - Returns: - ``{"switchIds": [...]}`` payload dict. - """ - return {"switchIds": serial_numbers} - - -__all__ = [ - "mask_password", - "PayloadUtils", -] diff --git a/plugins/module_utils/utils/manage_switches/switch_helpers.py b/plugins/module_utils/utils/manage_switches/switch_helpers.py deleted file mode 100644 index 539309a7..00000000 --- a/plugins/module_utils/utils/manage_switches/switch_helpers.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -"""Stateless utility helpers for switch field extraction, operation-type detection, and credential grouping.""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import logging -from typing import Any, Dict, List, Optional, Tuple, Union - - -def get_switch_field( - switch, - field_names: List[str], -) -> Optional[Any]: - """Extract a field value from a switch config, trying multiple names. - - Supports Pydantic models and plain dicts with both snake_case and - camelCase key lookups. - - Args: - switch: Switch model or dict to extract from. - field_names: Candidate field names to try, in priority order. - - Returns: - First non-``None`` value found, or ``None``. - """ - for name in field_names: - if hasattr(switch, name): - value = getattr(switch, name) - if value is not None: - return value - elif isinstance(switch, dict): - if name in switch and switch[name] is not None: - return switch[name] - # Try camelCase variant - camel = ''.join( - word.capitalize() if i > 0 else word - for i, word in enumerate(name.split('_')) - ) - if camel in switch and switch[camel] is not None: - return switch[camel] - return None - - -def determine_operation_type(switch) -> str: - """Determine the operation type from switch configuration. - - Args: - switch: A ``SwitchConfigModel``, ``SwitchDiscoveryModel``, - or raw dict. - - Returns: - ``'normal'``, ``'poap'``, or ``'rma'``. - """ - # Pydantic model with .operation_type attribute - if hasattr(switch, 'operation_type'): - return switch.operation_type - - if isinstance(switch, dict): - if 'poap' in switch or 'bootstrap' in switch: - return 'poap' - if ( - 'rma' in switch - or 'old_serial' in switch - or 'oldSerial' in switch - ): - return 'rma' - - return 'normal' - - -def group_switches_by_credentials( - switches, - log: logging.Logger, -) -> Dict[Tuple, list]: - """Group switches by shared credentials for bulk API operations. - - Args: - switches: Validated ``SwitchConfigModel`` instances. - log: Logger. - - Returns: - Dict mapping a ``(username, password_hash, auth_proto, - platform_type, preserve_config)`` tuple to the list of switches - sharing those credentials. - """ - groups: Dict[Tuple, list] = {} - - for switch in switches: - password_hash = hash(switch.password) - group_key = ( - switch.username, - password_hash, - switch.auth_proto, - switch.platform_type, - switch.preserve_config, - ) - groups.setdefault(group_key, []).append(switch) - - log.info( - f"Grouped {len(switches)} switches into " - f"{len(groups)} credential group(s)" - ) - - for idx, (key, group_switches) in enumerate(groups.items(), 1): - username, _, auth_proto, platform_type, preserve_config = key - auth_value = ( - auth_proto.value - if hasattr(auth_proto, 'value') - else str(auth_proto) - ) - platform_value = ( - platform_type.value - if hasattr(platform_type, 'value') - else str(platform_type) - ) - log.debug( - f"Group {idx}: {len(group_switches)} switches with " - f"username={username}, auth={auth_value}, " - f"platform={platform_value}, " - f"preserve_config={preserve_config}" - ) - - return groups - - -__all__ = [ - "get_switch_field", - "determine_operation_type", - "group_switches_by_credentials", -] diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 1019ff05..3942f93e 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -429,7 +429,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel -from ansible_collections.cisco.nd.plugins.module_utils.nd_switch_resources import NDSwitchResourceModule +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import NDSwitchResourceModule from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( NDModule, NDModuleError, From d87293738095e2f992da58e41fb9d0714243a9c5 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 27 Mar 2026 01:14:35 +0530 Subject: [PATCH 073/109] Documentation updates --- plugins/modules/nd_manage_switches.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 3942f93e..9bf88c5e 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -209,8 +209,8 @@ rma: description: - RMA an existing switch with a new one. - - Please note that the existing switch should be configured and deployed in maintenance mode. - - Please note that the existing switch being replaced should be shutdown state or out of network. + - Please note that the existing switch being replaced should be configured, deployed in maintenance mode + and then shutdown (unreachable state). type: list elements: dict suboptions: @@ -271,7 +271,7 @@ - cisco.nd.modules - cisco.nd.check_mode notes: -- This module requires ND 12.x or higher. +- This module requires ND 4.2 or higher. - POAP operations require POAP and DHCP to be enabled in fabric settings. - RMA operations require the old switch to be in a replaceable state. - Idempotence for B(Bootstrap) - A bootstrap entry is considered idempotent when From 5621cb4f52dfceef3cd1e4653f8dd97eae80492f Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 27 Mar 2026 01:16:45 +0530 Subject: [PATCH 074/109] Doc update --- plugins/modules/nd_manage_switches.py | 46 +++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 9bf88c5e..877ac868 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -158,6 +158,15 @@ - Serial number of the switch to Pre-provision. type: str required: true + discovery_username: + description: + - Username for device discovery during pre-provision. + type: str + discovery_password: + description: + - Password for device discovery during pre-provision. + type: str + no_log: true model: description: - Model of the switch to Pre-provision (e.g., N9K-C93180YC-EX). @@ -173,6 +182,10 @@ - Hostname for the switch during pre-provision. type: str required: true + image_policy: + description: + - Image policy to apply during pre-provision. + type: str config_data: description: - Basic configuration data for the switch during Pre-provision. @@ -193,19 +206,6 @@ - Gateway IP with subnet mask (e.g., 192.168.0.1/24). type: str required: true - discovery_username: - description: - - Username for device discovery during pre-provision. - type: str - discovery_password: - description: - - Password for device discovery during pre-provision. - type: str - no_log: true - image_policy: - description: - - Image policy to apply during pre-provision. - type: str rma: description: - RMA an existing switch with a new one. @@ -214,14 +214,6 @@ type: list elements: dict suboptions: - discovery_username: - description: - - Username for device discovery during POAP and RMA discovery. - type: str - discovery_password: - description: - - Password for device discovery during POAP and RMA discovery. - type: str new_serial_number: description: - Serial number of switch to Bootstrap for RMA. @@ -232,6 +224,15 @@ - Serial number of switch to be replaced by RMA. type: str required: true + discovery_username: + description: + - Username for device discovery during POAP and RMA discovery. + type: str + discovery_password: + description: + - Password for device discovery during POAP and RMA discovery. + type: str + no_log: true model: description: - Model of switch to Bootstrap for RMA. @@ -263,9 +264,6 @@ - Gateway IP with subnet mask (e.g., 192.168.0.1/24). type: str required: true - - Serial number of new replacement switch. - type: str - required: true extends_documentation_fragment: - cisco.nd.modules From 4f0773c6153edc82a30f09474f2efda1b823c148 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 11:25:30 +0530 Subject: [PATCH 075/109] Black Formatting Changes --- plugins/module_utils/endpoints/mixins.py | 52 ++- .../v1/manage/manage_credentials_switches.py | 7 +- .../endpoints/v1/manage/manage_fabrics.py | 19 +- .../v1/manage/manage_fabrics_actions.py | 8 +- .../v1/manage/manage_fabrics_bootstrap.py | 11 +- .../v1/manage/manage_fabrics_inventory.py | 4 +- .../v1/manage/manage_fabrics_switchactions.py | 41 +- .../v1/manage/manage_fabrics_switches.py | 55 ++- .../manage_switches/nd_switch_resources.py | 350 ++++++++++-------- plugins/module_utils/manage_switches/utils.py | 233 ++++-------- .../models/manage_switches/__init__.py | 5 +- .../manage_switches/bootstrap_models.py | 202 ++++------ .../models/manage_switches/config_models.py | 187 ++++++---- .../manage_switches/discovery_models.py | 133 +++---- .../models/manage_switches/enums.py | 44 ++- .../manage_switches/preprovision_models.py | 16 +- .../models/manage_switches/rma_models.py | 98 ++--- .../manage_switches/switch_actions_models.py | 49 +-- .../manage_switches/switch_data_models.py | 253 +++++-------- .../models/manage_switches/validators.py | 10 +- plugins/modules/nd_manage_switches.py | 16 +- 21 files changed, 870 insertions(+), 923 deletions(-) diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index 9cd60fff..df33d6d3 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -23,43 +23,57 @@ class ClusterNameMixin(BaseModel): """Mixin for endpoints that require cluster_name parameter.""" - cluster_name: Optional[str] = Field(default=None, min_length=1, description="Cluster name") + cluster_name: Optional[str] = Field( + default=None, min_length=1, description="Cluster name" + ) class FabricNameMixin(BaseModel): """Mixin for endpoints that require fabric_name parameter.""" - fabric_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Fabric name") + fabric_name: Optional[str] = Field( + default=None, min_length=1, max_length=64, description="Fabric name" + ) class FilterMixin(BaseModel): """Mixin for endpoints that require a Lucene filter expression.""" - filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") + filter: Optional[str] = Field( + default=None, min_length=1, description="Lucene filter expression" + ) class ForceShowRunMixin(BaseModel): """Mixin for endpoints that require force_show_run parameter.""" - force_show_run: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Force show running config") + force_show_run: BooleanStringEnum = Field( + default=BooleanStringEnum.FALSE, description="Force show running config" + ) class HealthCategoryMixin(BaseModel): """Mixin for endpoints that require health_category parameter.""" - health_category: Optional[str] = Field(default=None, min_length=1, description="Health category") + health_category: Optional[str] = Field( + default=None, min_length=1, description="Health category" + ) class InclAllMsdSwitchesMixin(BaseModel): """Mixin for endpoints that require incl_all_msd_switches parameter.""" - incl_all_msd_switches: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Include all MSD switches") + incl_all_msd_switches: BooleanStringEnum = Field( + default=BooleanStringEnum.FALSE, description="Include all MSD switches" + ) class LinkUuidMixin(BaseModel): """Mixin for endpoints that require link_uuid parameter.""" - link_uuid: Optional[str] = Field(default=None, min_length=1, description="Link UUID") + link_uuid: Optional[str] = Field( + default=None, min_length=1, description="Link UUID" + ) class LoginIdMixin(BaseModel): @@ -71,19 +85,25 @@ class LoginIdMixin(BaseModel): class MaxMixin(BaseModel): """Mixin for endpoints that require a max results parameter.""" - max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") + max: Optional[int] = Field( + default=None, ge=1, description="Maximum number of results" + ) class NetworkNameMixin(BaseModel): """Mixin for endpoints that require network_name parameter.""" - network_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Network name") + network_name: Optional[str] = Field( + default=None, min_length=1, max_length=64, description="Network name" + ) class NodeNameMixin(BaseModel): """Mixin for endpoints that require node_name parameter.""" - node_name: Optional[str] = Field(default=None, min_length=1, description="Node name") + node_name: Optional[str] = Field( + default=None, min_length=1, description="Node name" + ) class OffsetMixin(BaseModel): @@ -95,16 +115,22 @@ class OffsetMixin(BaseModel): class SwitchSerialNumberMixin(BaseModel): """Mixin for endpoints that require switch_sn parameter.""" - switch_sn: Optional[str] = Field(default=None, min_length=1, description="Switch serial number") + switch_sn: Optional[str] = Field( + default=None, min_length=1, description="Switch serial number" + ) class TicketIdMixin(BaseModel): """Mixin for endpoints that require ticket_id parameter.""" - ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + ticket_id: Optional[str] = Field( + default=None, min_length=1, description="Change control ticket ID" + ) class VrfNameMixin(BaseModel): """Mixin for endpoints that require vrf_name parameter.""" - vrf_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="VRF name") + vrf_name: Optional[str] = Field( + default=None, min_length=1, max_length=64, description="VRF name" + ) diff --git a/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py index 9609dc99..17ac0312 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py @@ -118,10 +118,13 @@ class EpManageCredentialsSwitchesPost(_EpManageCredentialsSwitchesBase): """ class_name: Literal["EpManageCredentialsSwitchesPost"] = Field( - default="EpManageCredentialsSwitchesPost", frozen=True, description="Class name for backward compatibility" + default="EpManageCredentialsSwitchesPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: CredentialsSwitchesEndpointParams = Field( - default_factory=CredentialsSwitchesEndpointParams, description="Endpoint-specific query parameters" + default_factory=CredentialsSwitchesEndpointParams, + description="Endpoint-specific query parameters", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index 6541dccc..9093a672 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -61,8 +61,12 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): ``` """ - force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") - incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") + force_show_run: Optional[bool] = Field( + default=None, description="Force show running config before deploy" + ) + incl_all_msd_switches: Optional[bool] = Field( + default=None, description="Include all MSD fabric switches" + ) class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): @@ -125,10 +129,13 @@ class EpManageFabricConfigDeployPost(_EpManageFabricsBase): """ class_name: Literal["EpManageFabricConfigDeployPost"] = Field( - default="EpManageFabricConfigDeployPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricConfigDeployPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: FabricConfigDeployEndpointParams = Field( - default_factory=FabricConfigDeployEndpointParams, description="Endpoint-specific query parameters" + default_factory=FabricConfigDeployEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -183,7 +190,9 @@ class EpManageFabricGet(_EpManageFabricsBase): """ class_name: Literal["EpManageFabricGet"] = Field( - default="EpManageFabricGet", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricGet", + frozen=True, + description="Class name for backward compatibility", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py index 5c2a72bb..4019af51 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py @@ -82,7 +82,9 @@ class EpManageFabricsActionsShallowDiscoveryPost(_EpManageFabricsActionsBase): """ class_name: Literal["EpManageFabricsActionsShallowDiscoveryPost"] = Field( - default="EpManageFabricsActionsShallowDiscoveryPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsActionsShallowDiscoveryPost", + frozen=True, + description="Class name for backward compatibility", ) @property @@ -125,7 +127,9 @@ class EpManageFabricsActionsConfigSavePost(_EpManageFabricsActionsBase): """ class_name: Literal["EpManageFabricsActionsConfigSavePost"] = Field( - default="EpManageFabricsActionsConfigSavePost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsActionsConfigSavePost", + frozen=True, + description="Class name for backward compatibility", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py index 89dcb6a8..d7231abd 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py @@ -43,7 +43,9 @@ ) -class FabricsBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): +class FabricsBootstrapEndpointParams( + FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams +): """ # Summary @@ -127,10 +129,13 @@ class EpManageFabricsBootstrapGet(_EpManageFabricsBootstrapBase): """ class_name: Literal["EpManageFabricsBootstrapGet"] = Field( - default="EpManageFabricsBootstrapGet", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsBootstrapGet", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: FabricsBootstrapEndpointParams = Field( - default_factory=FabricsBootstrapEndpointParams, description="Endpoint-specific query parameters" + default_factory=FabricsBootstrapEndpointParams, + description="Endpoint-specific query parameters", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py index 5cad5a42..b4dd0247 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py @@ -82,7 +82,9 @@ class EpManageFabricsInventoryDiscoverGet(_EpManageFabricsInventoryBase): """ class_name: Literal["EpManageFabricsInventoryDiscoverGet"] = Field( - default="EpManageFabricsInventoryDiscoverGet", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsInventoryDiscoverGet", + frozen=True, + description="Class name for backward compatibility", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py index 7613140d..73c7b148 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py @@ -45,7 +45,6 @@ NDEndpointBaseModel, ) - # ============================================================================ # Endpoint-specific query parameter classes # ============================================================================ @@ -94,7 +93,9 @@ class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): """ -class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): +class SwitchActionsImportEndpointParams( + ClusterNameMixin, TicketIdMixin, EndpointQueryParams +): """ # Summary @@ -181,10 +182,13 @@ class EpManageFabricsSwitchActionsRemovePost(_EpManageFabricsSwitchActionsBase): """ class_name: Literal["EpManageFabricsSwitchActionsRemovePost"] = Field( - default="EpManageFabricsSwitchActionsRemovePost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchActionsRemovePost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: SwitchActionsRemoveEndpointParams = Field( - default_factory=SwitchActionsRemoveEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsRemoveEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -253,11 +257,13 @@ class EpManageFabricsSwitchActionsChangeRolesPost(_EpManageFabricsSwitchActionsB """ class_name: Literal["EpManageFabricsSwitchActionsChangeRolesPost"] = Field( - default="EpManageFabricsSwitchActionsChangeRolesPost", frozen=True, + default="EpManageFabricsSwitchActionsChangeRolesPost", + frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( - default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -283,7 +289,9 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricsSwitchActionsImportBootstrapPost(_EpManageFabricsSwitchActionsBase): +class EpManageFabricsSwitchActionsImportBootstrapPost( + _EpManageFabricsSwitchActionsBase +): """ # Summary @@ -328,10 +336,13 @@ class EpManageFabricsSwitchActionsImportBootstrapPost(_EpManageFabricsSwitchActi """ class_name: Literal["EpManageFabricsSwitchActionsImportBootstrapPost"] = Field( - default="EpManageFabricsSwitchActionsImportBootstrapPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchActionsImportBootstrapPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: SwitchActionsImportEndpointParams = Field( - default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsImportEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -410,11 +421,13 @@ class EpManageFabricsSwitchActionsPreProvisionPost(_EpManageFabricsSwitchActions """ class_name: Literal["EpManageFabricsSwitchActionsPreProvisionPost"] = Field( - default="EpManageFabricsSwitchActionsPreProvisionPost", frozen=True, + default="EpManageFabricsSwitchActionsPreProvisionPost", + frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsImportEndpointParams = Field( - default_factory=SwitchActionsImportEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsImportEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -488,11 +501,13 @@ class EpManageFabricsSwitchActionsRediscoverPost(_EpManageFabricsSwitchActionsBa """ class_name: Literal["EpManageFabricsSwitchActionsRediscoverPost"] = Field( - default="EpManageFabricsSwitchActionsRediscoverPost", frozen=True, + default="EpManageFabricsSwitchActionsRediscoverPost", + frozen=True, description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( - default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", ) @property diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py index 485747ec..d485cd09 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py @@ -46,7 +46,10 @@ NDEndpointBaseModel, ) -class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): + +class FabricSwitchesGetEndpointParams( + FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams +): """ # Summary @@ -68,10 +71,14 @@ class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, Endpoi ``` """ - hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") + hostname: Optional[str] = Field( + default=None, min_length=1, description="Filter by switch hostname" + ) -class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): +class FabricSwitchesAddEndpointParams( + ClusterNameMixin, TicketIdMixin, EndpointQueryParams +): """ # Summary @@ -155,10 +162,13 @@ class EpManageFabricsSwitchesGet(_EpManageFabricsSwitchesBase): """ class_name: Literal["EpManageFabricsSwitchesGet"] = Field( - default="EpManageFabricsSwitchesGet", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchesGet", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: FabricSwitchesGetEndpointParams = Field( - default_factory=FabricSwitchesGetEndpointParams, description="Endpoint-specific query parameters" + default_factory=FabricSwitchesGetEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -228,10 +238,13 @@ class EpManageFabricsSwitchesPost(_EpManageFabricsSwitchesBase): """ class_name: Literal["EpManageFabricsSwitchesPost"] = Field( - default="EpManageFabricsSwitchesPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchesPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: FabricSwitchesAddEndpointParams = Field( - default_factory=FabricSwitchesAddEndpointParams, description="Endpoint-specific query parameters" + default_factory=FabricSwitchesAddEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -260,6 +273,7 @@ def verb(self) -> HttpVerbEnum: # Per-Switch Action Endpoints # ============================================================================ + class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): """ # Summary @@ -299,7 +313,10 @@ class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): ``` """ -class _EpManageFabricsSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): + +class _EpManageFabricsSwitchActionsPerSwitchBase( + FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel +): """ Base class for per-switch action endpoints. @@ -314,7 +331,9 @@ def _base_path(self) -> str: raise ValueError("fabric_name must be set before accessing path") if self.switch_sn is None: raise ValueError("switch_sn must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") + return BasePath.path( + "fabrics", self.fabric_name, "switches", self.switch_sn, "actions" + ) class EpManageFabricsSwitchProvisionRMAPost(_EpManageFabricsSwitchActionsPerSwitchBase): @@ -362,10 +381,13 @@ class EpManageFabricsSwitchProvisionRMAPost(_EpManageFabricsSwitchActionsPerSwit """ class_name: Literal["EpManageFabricsSwitchProvisionRMAPost"] = Field( - default="EpManageFabricsSwitchProvisionRMAPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchProvisionRMAPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: SwitchActionsTicketEndpointParams = Field( - default_factory=SwitchActionsTicketEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", ) @property @@ -383,7 +405,9 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricsSwitchChangeSerialNumberPost(_EpManageFabricsSwitchActionsPerSwitchBase): +class EpManageFabricsSwitchChangeSerialNumberPost( + _EpManageFabricsSwitchActionsPerSwitchBase +): """ # Summary @@ -428,10 +452,13 @@ class EpManageFabricsSwitchChangeSerialNumberPost(_EpManageFabricsSwitchActionsP """ class_name: Literal["EpManageFabricsSwitchChangeSerialNumberPost"] = Field( - default="EpManageFabricsSwitchChangeSerialNumberPost", frozen=True, description="Class name for backward compatibility" + default="EpManageFabricsSwitchChangeSerialNumberPost", + frozen=True, + description="Class name for backward compatibility", ) endpoint_params: SwitchActionsClusterEndpointParams = Field( - default_factory=SwitchActionsClusterEndpointParams, description="Endpoint-specific query parameters" + default_factory=SwitchActionsClusterEndpointParams, + description="Endpoint-specific query parameters", ) @property diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 6b9a1b99..cd067828 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -23,7 +23,9 @@ from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType -from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import NDConfigCollection +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import ( + NDConfigCollection, +) from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches import ( @@ -81,7 +83,6 @@ EpManageCredentialsSwitchesPost, ) - # ========================================================================= # Constants & Globals # ========================================================================= @@ -90,7 +91,6 @@ _DISCOVERY_MAX_HOPS: int = 0 - @dataclass class SwitchServiceContext: """Store shared dependencies used by service classes. @@ -103,6 +103,7 @@ class SwitchServiceContext: save_config: Whether to run fabric save after changes. deploy_config: Whether to run fabric deploy after changes. """ + nd: NDModule results: Results fabric: str @@ -115,6 +116,7 @@ class SwitchServiceContext: # Validation & Diff # ========================================================================= + class SwitchDiffEngine: """Provide stateless validation and diff computation helpers.""" @@ -152,13 +154,13 @@ def validate_configs( ) validated_configs.append(validated) except ValidationError as e: - error_detail = e.errors() if hasattr(e, 'errors') else str(e) + error_detail = e.errors() if hasattr(e, "errors") else str(e) error_msg = ( f"Configuration validation failed for " f"config index {idx}: {error_detail}" ) log.error(error_msg) - if hasattr(nd, 'module'): + if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) else: raise ValueError(error_msg) from e @@ -168,7 +170,7 @@ def validate_configs( f"config index {idx}: {str(e)}" ) log.error(error_msg) - if hasattr(nd, 'module'): + if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) else: raise ValueError(error_msg) from e @@ -190,7 +192,7 @@ def validate_configs( f"{sorted(duplicate_ips)}. Each switch must appear only once." ) log.error(error_msg) - if hasattr(nd, 'module'): + if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) else: raise ValueError(error_msg) @@ -279,16 +281,16 @@ def compute_changes( changes["to_add"].append(prop_sw) continue - log.debug(f"Switch {ip} (id={sid}) found in existing with {match_key} match {existing_sw}") + log.debug( + f"Switch {ip} (id={sid}) found in existing with {match_key} match {existing_sw}" + ) log.debug( f"Switch {ip} matched existing by {match_key} " f"(existing_id={existing_sw.switch_id})" ) if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: - log.info( - f"Switch {ip} ({existing_sw.switch_id}) is in Migration mode" - ) + log.info(f"Switch {ip} ({existing_sw.switch_id}) is in Migration mode") changes["migration_mode"].append(prop_sw) continue @@ -303,7 +305,11 @@ def compute_changes( log.debug(f"Switch {ip} is idempotent — no changes needed") changes["idempotent"].append(prop_sw) else: - diff_keys = {k for k in set(prop_dict) | set(existing_dict) if prop_dict.get(k) != existing_dict.get(k)} + diff_keys = { + k + for k in set(prop_dict) | set(existing_dict) + if prop_dict.get(k) != existing_dict.get(k) + } log.info( f"Switch {ip} has differences — marking to_update. " f"Changed fields: {diff_keys}" @@ -385,9 +391,8 @@ def validate_switch_api_fields( ) if config_data is not None: - bs_gateway = ( - bootstrap_data.get("gatewayIpMask") - or bs_data.get("gatewayIpMask") + bs_gateway = bootstrap_data.get("gatewayIpMask") or bs_data.get( + "gatewayIpMask" ) if config_data.gateway is not None and config_data.gateway != bs_gateway: mismatches.append( @@ -396,10 +401,7 @@ def validate_switch_api_fields( ) bs_models = bs_data.get("models", []) - if ( - config_data.models - and sorted(config_data.models) != sorted(bs_models) - ): + if config_data.models and sorted(config_data.models) != sorted(bs_models): mismatches.append( f"config_data.models: provided {config_data.models}, " f"bootstrap reports {bs_models}" @@ -410,8 +412,7 @@ def validate_switch_api_fields( msg=( f"{context} field mismatch for serial '{serial}'. " f"The following provided values do not match the " - f"bootstrap API data:\n" - + "\n".join(f" - {m}" for m in mismatches) + f"bootstrap API data:\n" + "\n".join(f" - {m}" for m in mismatches) ) ) @@ -432,15 +433,14 @@ def validate_switch_api_fields( f"{', '.join(pulled)}" ) else: - log.debug( - f"{context} field validation passed for serial '{serial}'" - ) + log.debug(f"{context} field validation passed for serial '{serial}'") # ========================================================================= # Switch Discovery Service # ========================================================================= + class SwitchDiscoveryService: """Handle switch discovery and proposed-model construction.""" @@ -625,16 +625,11 @@ def bulk_discover( f"{len(discovered_results)}/{len(seed_ips)} switches successful" ) log.debug(f"Discovered switches: {list(discovered_results.keys())}") - log.debug( - f"EXIT: bulk_discover() -> {len(discovered_results)} discovered" - ) + log.debug(f"EXIT: bulk_discover() -> {len(discovered_results)} discovered") return discovered_results except Exception as e: - msg = ( - f"Bulk discovery failed for switches " - f"{', '.join(seed_ips)}: {e}" - ) + msg = f"Bulk discovery failed for switches " f"{', '.join(seed_ips)}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -664,9 +659,7 @@ def build_proposed( if discovered: if cfg.role is not None: discovered = {**discovered, "role": cfg.role} - proposed.append( - SwitchDataModel.from_response(discovered) - ) + proposed.append(SwitchDataModel.from_response(discovered)) log.debug(f"Built proposed model from discovery for {seed_ip}") continue @@ -678,7 +671,9 @@ def build_proposed( if existing_match: if cfg.role is not None: data = existing_match.model_dump(by_alias=True) - data["switchRole"] = cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role + data["switchRole"] = ( + cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role + ) proposed.append(SwitchDataModel.model_validate(data)) else: proposed.append(existing_match) @@ -702,6 +697,7 @@ def build_proposed( # Bulk Fabric Operations # ========================================================================= + class SwitchFabricOps: """Run fabric mutation operations for add, delete, credentials, and roles.""" @@ -763,7 +759,7 @@ def bulk_add( log.error(msg) nd.module.fail_json(msg=msg) - switch_role = switch_config.role if hasattr(switch_config, 'role') else None + switch_role = switch_config.role if hasattr(switch_config, "role") else None switch_discovery = SwitchDiscoveryModel( hostname=discovered.get("hostname"), @@ -781,7 +777,9 @@ def bulk_add( if not switch_discoveries: log.error("No valid switches to add after validation") - raise SwitchOperationError("No valid switches to add - all failed validation") + raise SwitchOperationError( + "No valid switches to add - all failed validation" + ) add_request = AddSwitchesRequestModel( switches=switch_discoveries, @@ -859,15 +857,17 @@ def bulk_delete( serial_numbers: List[str] = [] for switch in switches: sn = None - if hasattr(switch, 'switch_id'): + if hasattr(switch, "switch_id"): sn = switch.switch_id - elif hasattr(switch, 'serial_number'): + elif hasattr(switch, "serial_number"): sn = switch.serial_number if sn: serial_numbers.append(sn) else: - ip = getattr(switch, 'fabric_management_ip', None) or getattr(switch, 'ip', None) + ip = getattr(switch, "fabric_management_ip", None) or getattr( + switch, "ip", None + ) log.warning(f"Cannot delete switch {ip}: no serial number/switch_id") if not serial_numbers: @@ -930,7 +930,9 @@ def bulk_save_credentials( cred_groups: Dict[Tuple[str, str], List[str]] = {} for sn, cfg in switch_actions: if not cfg.username or not cfg.password: - log.debug(f"Skipping credentials for {sn}: missing username or password") + log.debug( + f"Skipping credentials for {sn}: missing username or password" + ) continue key = (cfg.username, cfg.password) cred_groups.setdefault(key, []).append(sn) @@ -953,9 +955,7 @@ def bulk_save_credentials( f"Saving credentials for {len(serial_numbers)} switch(es): {serial_numbers}" ) log.debug(f"Credentials endpoint: {endpoint.path}") - log.debug( - f"Credentials payload (masked): {mask_password(payload)}" - ) + log.debug(f"Credentials payload (masked): {mask_password(payload)}") try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -975,8 +975,7 @@ def bulk_save_credentials( log.info(f"Credentials saved for {len(serial_numbers)} switch(es)") except Exception as e: msg = ( - f"Failed to save credentials for " - f"switches {serial_numbers}: {e}" + f"Failed to save credentials for " f"switches {serial_numbers}: {e}" ) log.error(msg) nd.module.fail_json(msg=msg) @@ -1003,7 +1002,7 @@ def bulk_update_roles( switch_roles = [] for sn, cfg in switch_actions: - role = get_switch_field(cfg, ['role']) + role = get_switch_field(cfg, ["role"]) if not role: continue role_value = role.value if isinstance(role, SwitchRole) else str(role) @@ -1035,9 +1034,7 @@ def bulk_update_roles( results.register_api_call() log.info(f"Roles updated for {len(switch_roles)} switch(es)") except Exception as e: - msg = ( - f"Failed to bulk update roles for switches: {e}" - ) + msg = f"Failed to bulk update roles for switches: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1133,6 +1130,7 @@ def post_add_processing( # POAP Handler (Bootstrap / Pre-Provision) # ========================================================================= + class POAPHandler: """Handle POAP workflows for bootstrap, pre-provision, and serial swap.""" @@ -1180,7 +1178,9 @@ def handle( # Classify entries first so check mode can report per-operation counts bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] preprov_entries: List[Tuple[SwitchConfigModel, PreprovisionConfigModel]] = [] - swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel]] = [] + swap_entries: List[ + Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel] + ] = [] for switch_cfg in proposed_config: has_poap = bool(switch_cfg.poap) @@ -1189,12 +1189,26 @@ def handle( if has_poap and has_preprov: # Swap: only serial_number is meaningful on each side; warn about extras poap_extra = [ - f for f in ["hostname", "image_policy", "discovery_username", "discovery_password"] + f + for f in [ + "hostname", + "image_policy", + "discovery_username", + "discovery_password", + ] if getattr(switch_cfg.poap, f, None) ] preprov_extra = [ - f for f in ["model", "version", "hostname", "config_data", - "image_policy", "discovery_username", "discovery_password"] + f + for f in [ + "model", + "version", + "hostname", + "config_data", + "image_policy", + "discovery_username", + "discovery_password", + ] if getattr(switch_cfg.preprovision, f, None) ] if poap_extra: @@ -1207,7 +1221,9 @@ def handle( f"Swap ({switch_cfg.seed_ip}): extra fields in 'preprovision' will be " f"ignored during swap: {preprov_extra}" ) - swap_entries.append((switch_cfg, switch_cfg.poap, switch_cfg.preprovision)) + swap_entries.append( + (switch_cfg, switch_cfg.poap, switch_cfg.preprovision) + ) elif has_preprov: preprov_entries.append((switch_cfg, switch_cfg.preprovision)) elif has_poap: @@ -1247,9 +1263,7 @@ def handle( # Bootstrap: idempotent when both IP address AND serial number match. # PreProvision: idempotent when IP address alone matches. existing_by_ip = { - sw.fabric_management_ip: sw - for sw in existing - if sw.fabric_management_ip + sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip } active_bootstrap = [] @@ -1424,10 +1438,7 @@ def _build_bootstrap_import_model( model = bs.get("model", "") version = bs.get("softwareVersion", "") - gateway_ip_mask = ( - bs.get("gatewayIpMask") - or bs_data.get("gatewayIpMask") - ) + gateway_ip_mask = bs.get("gatewayIpMask") or bs_data.get("gatewayIpMask") data_models = bs_data.get("models", []) # Hostname: user-provided via poap.hostname is the default; if the @@ -1523,9 +1534,7 @@ def _import_bootstrap_switches( payload = request_model.to_payload() log.debug(f"importBootstrap endpoint: {endpoint.path}") - log.debug( - f"importBootstrap payload (masked): {mask_password(payload)}" - ) + log.debug(f"importBootstrap payload (masked): {mask_password(payload)}") log.info( f"Importing {len(models)} bootstrap switch(es): " f"{[m.serial_number for m in models]}" @@ -1614,9 +1623,7 @@ def _build_preprovision_model( switchRole=switch_role, ) - log.debug( - f"EXIT: _build_preprovision_model() -> {preprov_model.serial_number}" - ) + log.debug(f"EXIT: _build_preprovision_model() -> {preprov_model.serial_number}") return preprov_model def _preprovision_switches( @@ -1644,9 +1651,7 @@ def _preprovision_switches( payload = request_model.to_payload() log.debug(f"preProvision endpoint: {endpoint.path}") - log.debug( - f"preProvision payload (masked): {mask_password(payload)}" - ) + log.debug(f"preProvision payload (masked): {mask_password(payload)}") log.info( f"Pre-provisioning {len(models)} switch(es): " f"{[m.serial_number for m in models]}" @@ -1685,7 +1690,9 @@ def _preprovision_switches( def _handle_poap_swap( self, - swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"]], + swap_entries: List[ + Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"] + ], existing: List[SwitchDataModel], ) -> None: """Process POAP serial-swap entries. @@ -1757,8 +1764,7 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) log.info( - f"Validated: new serial '{new_serial}' exists in " - f"bootstrap list" + f"Validated: new serial '{new_serial}' exists in " f"bootstrap list" ) # ------------------------------------------------------------------ @@ -1777,18 +1783,14 @@ def _handle_poap_swap( endpoint.fabric_name = fabric endpoint.switch_sn = old_serial - request_body = ChangeSwitchSerialNumberRequestModel( - newSwitchId=new_serial - ) + request_body = ChangeSwitchSerialNumberRequestModel(newSwitchId=new_serial) payload = request_body.to_payload() log.debug(f"changeSwitchSerialNumber endpoint: {endpoint.path}") log.debug(f"changeSwitchSerialNumber payload: {payload}") try: - nd.request( - path=endpoint.path, verb=endpoint.verb, data=payload - ) + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: msg = ( f"changeSwitchSerialNumber API call failed for " @@ -1818,9 +1820,7 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) - log.info( - f"Serial number swap successful: {old_serial} → {new_serial}" - ) + log.info(f"Serial number swap successful: {old_serial} → {new_serial}") # ------------------------------------------------------------------ # Step 4: Re-query bootstrap API for post-swap data @@ -1828,8 +1828,7 @@ def _handle_poap_swap( post_swap_bootstrap = query_bootstrap_switches(nd, fabric, log) post_swap_index = build_bootstrap_index(post_swap_bootstrap) log.debug( - f"Post-swap bootstrap list contains " - f"{len(post_swap_index)} switch(es)" + f"Post-swap bootstrap list contains " f"{len(post_swap_index)} switch(es)" ) # ------------------------------------------------------------------ @@ -1866,9 +1865,7 @@ def _handle_poap_swap( try: self._import_bootstrap_switches(import_models) except Exception as e: - msg = ( - f"importBootstrap failed after serial swap: {e}" - ) + msg = f"importBootstrap failed after serial swap: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1897,6 +1894,7 @@ def _handle_poap_swap( # RMA Handler (Return Material Authorization) # ========================================================================= + class RMAHandler: """Handle RMA workflows for switch replacement.""" @@ -1990,7 +1988,9 @@ def handle( # Build and submit each RMA request switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = [] # (new_serial, old_serial, switch_cfg) + rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = ( + [] + ) # (new_serial, old_serial, switch_cfg) for switch_cfg, rma_cfg in rma_entries: new_serial = rma_cfg.new_serial_number bootstrap_data = bootstrap_idx.get(new_serial) @@ -2006,18 +2006,20 @@ def handle( nd.module.fail_json(msg=msg) SwitchDiffEngine.validate_switch_api_fields( - nd=nd, - serial=rma_cfg.new_serial_number, - model=rma_cfg.model, - version=rma_cfg.version, - config_data=rma_cfg.config_data, - bootstrap_data=bootstrap_data, - log=log, - context="RMA", - ) + nd=nd, + serial=rma_cfg.new_serial_number, + model=rma_cfg.model, + version=rma_cfg.version, + config_data=rma_cfg.config_data, + bootstrap_data=bootstrap_data, + log=log, + context="RMA", + ) rma_model = self._build_rma_model( - switch_cfg, rma_cfg, bootstrap_data, + switch_cfg, + rma_cfg, + bootstrap_data, old_switch_info[rma_cfg.old_serial_number], ) log.info( @@ -2027,7 +2029,9 @@ def handle( self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) switch_actions.append((rma_model.new_switch_id, switch_cfg)) - rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg)) + rma_diff_data.append( + (rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg) + ) # Post-processing: wait for RMA switches to become ready, then # save credentials and finalize. RMA switches come up via POAP @@ -2211,9 +2215,8 @@ def _build_rma_model( or bs_data.get("gatewayIpMask") ) data_models = ( - (rma_cfg.config_data.models if rma_cfg.config_data else None) - or bs_data.get("models", []) - ) + rma_cfg.config_data.models if rma_cfg.config_data else None + ) or bs_data.get("models", []) rma_model = RMASwitchModel( gatewayIpMask=gateway_ip_mask, @@ -2230,12 +2233,14 @@ def _build_rma_model( newSwitchId=new_switch_id, publicKey=public_key, fingerPrint=finger_print, - data={"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None, + data=( + {"gatewayIpMask": gateway_ip_mask, "models": data_models} + if (gateway_ip_mask or data_models) + else None + ), ) - log.debug( - f"EXIT: _build_rma_model() -> newSwitchId={rma_model.new_switch_id}" - ) + log.debug(f"EXIT: _build_rma_model() -> newSwitchId={rma_model.new_switch_id}") return rma_model def _provision_rma_switch( @@ -2307,7 +2312,8 @@ def _provision_rma_switch( # Orchestrator (Thin State Router) # ========================================================================= -class NDSwitchResourceModule(): + +class NDSwitchResourceModule: """Orchestrate switch lifecycle management across supported states.""" # ===================================================================== @@ -2353,13 +2359,17 @@ def __init__( # Switch collections try: - self.proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.proposed: NDConfigCollection = NDConfigCollection( + model_class=SwitchDataModel + ) self.existing: NDConfigCollection = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel, ) self.before: NDConfigCollection = self.existing.copy() - self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.sent: NDConfigCollection = NDConfigCollection( + model_class=SwitchDataModel + ) except Exception as e: msg = ( f"Failed to query fabric '{self.fabric}' inventory " @@ -2370,7 +2380,9 @@ def __init__( # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] - self.output: NDOutput = NDOutput(output_level=self.module.params.get("output_level", "normal")) + self.output: NDOutput = NDOutput( + output_level=self.module.params.get("output_level", "normal") + ) self.output.assign(before=self.before, after=self.existing) # Utility instances (SwitchWaitUtils / FabricUtils depend on self) @@ -2405,11 +2417,11 @@ def exit_json(self) -> None: gathered = [] for sw in self.existing: try: - gathered.append(SwitchConfigModel.from_switch_data(sw).to_gathered_dict()) - except (ValueError, Exception) as exc: - msg = ( - f"Failed to convert switch {sw.switch_id!r} to gathered format: {exc}" + gathered.append( + SwitchConfigModel.from_switch_data(sw).to_gathered_dict() ) + except (ValueError, Exception) as exc: + msg = f"Failed to convert switch {sw.switch_id!r} to gathered format: {exc}" self.log.error(msg) self.nd.module.fail_json(msg=msg) self.output.assign(after=self.existing) @@ -2419,7 +2431,8 @@ def exit_json(self) -> None: # that "after" reflects real state rather than the pre-op snapshot. if True not in self.results.failed and not self.nd.module.check_mode: self.existing = NDConfigCollection.from_api_response( - response_data=self._query_all_switches(), model_class=SwitchDataModel + response_data=self._query_all_switches(), + model_class=SwitchDataModel, ) self.output.assign(after=self.existing, diff=self.sent) final.update(self.output.format()) @@ -2455,7 +2468,9 @@ def manage_state(self) -> None: # deleted — config is optional if self.state == "deleted": proposed_config = ( - SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) + SwitchDiffEngine.validate_configs( + self.config, self.state, self.nd, self.log + ) if self.config else None ) @@ -2471,11 +2486,17 @@ def manage_state(self) -> None: self.config, self.state, self.nd, self.log ) # Partition configs by operation type - poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] + poap_configs = [ + c + for c in proposed_config + if c.operation_type in ("poap", "preprovision", "swap") + ] rma_configs = [c for c in proposed_config if c.operation_type == "rma"] normal_configs = [c for c in proposed_config if c.operation_type == "normal"] # Capture all proposed configs for NDOutput - output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) + output_proposed: NDConfigCollection = NDConfigCollection( + model_class=SwitchConfigModel + ) for cfg in proposed_config: output_proposed.add(cfg) self.output.assign(proposed=output_proposed) @@ -2495,7 +2516,9 @@ def manage_state(self) -> None: # before POAP/RMA handlers execute. if normal_configs: existing_ips = {sw.fabric_management_ip for sw in self.existing} - configs_to_discover = [cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips] + configs_to_discover = [ + cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips + ] if configs_to_discover: self.log.info( f"Discovery needed for {len(configs_to_discover)}/{len(normal_configs)} " @@ -2503,7 +2526,9 @@ def manage_state(self) -> None: ) discovered_data = self.discovery.discover(configs_to_discover) else: - self.log.info("All proposed switches already in fabric — skipping discovery") + self.log.info( + "All proposed switches already in fabric — skipping discovery" + ) discovered_data = {} built = self.discovery.build_proposed( normal_configs, discovered_data, list(self.existing) @@ -2584,11 +2609,16 @@ def _handle_merged_state( self.results.action = "merge" self.results.state = self.state self.results.operation_type = OperationType.CREATE - self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.response_current = { + "MESSAGE": "check mode — skipped", + "RETURN_CODE": 200, + } self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_add": [sw.fabric_management_ip for sw in switches_to_add], - "migration_mode": [sw.fabric_management_ip for sw in migration_switches], + "migration_mode": [ + sw.fabric_management_ip for sw in migration_switches + ], "save_deploy_required": idempotent_save_req, } self.results.register_api_call() @@ -2613,7 +2643,13 @@ def _handle_merged_state( if add_configs: credential_groups = group_switches_by_credentials(add_configs, self.log) for group_key, group_switches in credential_groups.items(): - username, password_hash, auth_proto, platform_type, preserve_config = group_key + ( + username, + password_hash, + auth_proto, + platform_type, + preserve_config, + ) = group_key password = group_switches[0].password pairs = [] @@ -2622,7 +2658,9 @@ def _handle_merged_state( if disc: pairs.append((cfg, disc)) else: - self.log.warning(f"No discovery data for {cfg.seed_ip}, skipping") + self.log.warning( + f"No discovery data for {cfg.seed_ip}, skipping" + ) if not pairs: continue @@ -2672,9 +2710,7 @@ def _handle_merged_state( # preserve_config=True the switches will NOT reload after being # added to the fabric. Passing this flag lets the wait utility # skip the unreachable/reload detection phases. - all_preserve_config = all( - cfg.preserve_config for _, cfg in switch_actions - ) + all_preserve_config = all(cfg.preserve_config for _, cfg in switch_actions) if all_preserve_config: self.log.info( "All switches in batch are brownfield (preserve_config=True) — " @@ -2798,7 +2834,10 @@ def _handle_overridden_state( self.results.action = "override" self.results.state = self.state self.results.operation_type = OperationType.CREATE - self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.response_current = { + "MESSAGE": "check mode — skipped", + "RETURN_CODE": 200, + } self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_delete": n_delete, @@ -2823,9 +2862,12 @@ def _handle_overridden_state( # Phase 2: Switches that need updating (delete-then-re-add) for sw in diff.get("to_update", []): existing_sw = next( - (e for e in self.existing - if e.switch_id == sw.switch_id - or e.fabric_management_ip == sw.fabric_management_ip), + ( + e + for e in self.existing + if e.switch_id == sw.switch_id + or e.fabric_management_ip == sw.fabric_management_ip + ), None, ) if existing_sw: @@ -2834,7 +2876,9 @@ def _handle_overridden_state( f"{existing_sw.fabric_management_ip} ({existing_sw.switch_id})" ) switches_to_delete.append(existing_sw) - self._log_operation("delete_for_update", existing_sw.fabric_management_ip) + self._log_operation( + "delete_for_update", existing_sw.fabric_management_ip + ) diff["to_add"].append(sw) @@ -2842,9 +2886,7 @@ def _handle_overridden_state( try: self.fabric_ops.bulk_delete(switches_to_delete) except SwitchOperationError as e: - msg = ( - f"Failed to delete switches during overridden state: {e}" - ) + msg = f"Failed to delete switches during overridden state: {e}" self.log.error(msg) self.nd.module.fail_json(msg=msg) for sw in switches_to_delete: @@ -2928,9 +2970,15 @@ def _handle_deleted_state( switches_to_delete: List[SwitchDataModel] = [] for switch_config in proposed_config: identifier = switch_config.seed_ip - self.log.debug(f"Looking for switch to delete with seed IP: {identifier}") + self.log.debug( + f"Looking for switch to delete with seed IP: {identifier}" + ) existing_switch = next( - (sw for sw in self.existing if sw.fabric_management_ip == identifier), + ( + sw + for sw in self.existing + if sw.fabric_management_ip == identifier + ), None, ) if existing_switch: @@ -2948,11 +2996,16 @@ def _handle_deleted_state( # Check mode — preview only if self.nd.module.check_mode: - self.log.info(f"Check mode: would delete {len(switches_to_delete)} switch(es)") + self.log.info( + f"Check mode: would delete {len(switches_to_delete)} switch(es)" + ) self.results.action = "delete" self.results.state = self.state self.results.operation_type = OperationType.DELETE - self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.response_current = { + "MESSAGE": "check mode — skipped", + "RETURN_CODE": 200, + } self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], @@ -2986,10 +3039,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) except Exception as e: - msg = ( - f"Failed to query switches from " - f"fabric '{self.fabric}': {e}" - ) + msg = f"Failed to query switches from " f"fabric '{self.fabric}': {e}" self.log.error(msg) self.nd.module.fail_json(msg=msg) @@ -3017,8 +3067,10 @@ def _log_operation(self, operation: str, identifier: str) -> None: Returns: None. """ - self.nd_logs.append({ - "operation": operation, - "identifier": identifier, - "status": "success", - }) + self.nd_logs.append( + { + "operation": operation, + "identifier": identifier, + "status": "success", + } + ) diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index ed47393c..0f41f35c 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -35,7 +35,6 @@ SwitchOperationError, ) - # ========================================================================= # Payload Utilities # ========================================================================= @@ -140,9 +139,9 @@ def get_switch_field( if name in switch and switch[name] is not None: return switch[name] # Try camelCase variant - camel = ''.join( + camel = "".join( word.capitalize() if i > 0 else word - for i, word in enumerate(name.split('_')) + for i, word in enumerate(name.split("_")) ) if camel in switch and switch[camel] is not None: return switch[camel] @@ -160,20 +159,16 @@ def determine_operation_type(switch) -> str: ``'normal'``, ``'poap'``, or ``'rma'``. """ # Pydantic model with .operation_type attribute - if hasattr(switch, 'operation_type'): + if hasattr(switch, "operation_type"): return switch.operation_type if isinstance(switch, dict): - if 'poap' in switch or 'bootstrap' in switch: - return 'poap' - if ( - 'rma' in switch - or 'old_serial' in switch - or 'oldSerial' in switch - ): - return 'rma' + if "poap" in switch or "bootstrap" in switch: + return "poap" + if "rma" in switch or "old_serial" in switch or "oldSerial" in switch: + return "rma" - return 'normal' + return "normal" def group_switches_by_credentials( @@ -205,20 +200,17 @@ def group_switches_by_credentials( groups.setdefault(group_key, []).append(switch) log.info( - f"Grouped {len(switches)} switches into " - f"{len(groups)} credential group(s)" + f"Grouped {len(switches)} switches into " f"{len(groups)} credential group(s)" ) for idx, (key, group_switches) in enumerate(groups.items(), 1): username, _, auth_proto, platform_type, preserve_config = key auth_value = ( - auth_proto.value - if hasattr(auth_proto, 'value') - else str(auth_proto) + auth_proto.value if hasattr(auth_proto, "value") else str(auth_proto) ) platform_value = ( platform_type.value - if hasattr(platform_type, 'value') + if hasattr(platform_type, "value") else str(platform_type) ) log.debug( @@ -259,13 +251,11 @@ def query_bootstrap_switches( try: result = nd.request( - path=endpoint.path, verb=endpoint.verb, + path=endpoint.path, + verb=endpoint.verb, ) except Exception as e: - msg = ( - f"Failed to query bootstrap switches for " - f"fabric '{fabric}': {e}" - ) + msg = f"Failed to query bootstrap switches for " f"fabric '{fabric}': {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -276,10 +266,7 @@ def query_bootstrap_switches( else: switches = [] - log.info( - f"Bootstrap API returned {len(switches)} " - f"switch(es) in POAP loop" - ) + log.info(f"Bootstrap API returned {len(switches)} " f"switch(es) in POAP loop") log.debug("EXIT: query_bootstrap_switches()") return switches @@ -341,23 +328,30 @@ class SwitchWaitUtils: MANAGEABLE_STATUSES = frozenset({"ok", "manageable"}) # Status values indicating an operation is still in progress - IN_PROGRESS_STATUSES = frozenset({ - "inProgress", "migration", "discovering", "rediscovering", - }) + IN_PROGRESS_STATUSES = frozenset( + { + "inProgress", + "migration", + "discovering", + "rediscovering", + } + ) # Status values indicating failure - FAILED_STATUSES = frozenset({ - "failed", - "unreachable", - "authenticationFailed", - "timeout", - "discoveryTimeout", - "notReacheable", # Note: typo matches the API spec - "notAuthorized", - "unknownUserPassword", - "connectionError", - "sshSessionError", - }) + FAILED_STATUSES = frozenset( + { + "failed", + "unreachable", + "authenticationFailed", + "timeout", + "discoveryTimeout", + "notReacheable", # Note: typo matches the API spec + "notAuthorized", + "unknownUserPassword", + "connectionError", + "sshSessionError", + } + ) # Sleep multipliers for each phase _MIGRATION_SLEEP_FACTOR: float = 2.0 @@ -388,9 +382,7 @@ def __init__( self.log = logger or logging.getLogger("nd.SwitchWaitUtils") self.max_attempts = max_attempts or self.DEFAULT_MAX_ATTEMPTS self.wait_interval = wait_interval or self.DEFAULT_WAIT_INTERVAL - self.fabric_utils = ( - fabric_utils or FabricUtils(nd_module, fabric, self.log) - ) + self.fabric_utils = fabric_utils or FabricUtils(nd_module, fabric, self.log) # Pre-configure endpoints self.ep_switches_get = EpManageFabricsSwitchesGet() @@ -433,9 +425,7 @@ def wait_for_switch_manageable( Returns: ``True`` if all switches are manageable, ``False`` on timeout. """ - self.log.info( - f"Waiting for switches to become manageable: {serial_numbers}" - ) + self.log.info(f"Waiting for switches to become manageable: {serial_numbers}") # Phase 1 + 2: migration → normal if not self._wait_for_system_mode(serial_numbers): @@ -450,13 +440,9 @@ def wait_for_switch_manageable( return True # Phase 4: greenfield shortcut (skipped for POAP bootstrap) - if ( - not skip_greenfield_check - and self._is_greenfield_debug_enabled() - ): + if not skip_greenfield_check and self._is_greenfield_debug_enabled(): self.log.info( - "Greenfield debug flag enabled — " - "skipping reload detection" + "Greenfield debug flag enabled — " "skipping reload detection" ) return True @@ -467,15 +453,11 @@ def wait_for_switch_manageable( ) # Phase 5: wait for "unreachable" (switch is reloading) - if not self._wait_for_discovery_state( - serial_numbers, "unreachable" - ): + if not self._wait_for_discovery_state(serial_numbers, "unreachable"): return False # Phase 6: wait for "ok" (switch is ready) - return self._wait_for_discovery_state( - serial_numbers, "ok" - ) + return self._wait_for_discovery_state(serial_numbers, "ok") def wait_for_rma_switch_ready( self, @@ -534,25 +516,16 @@ def wait_for_discovery( for attempt in range(attempts): status = self._get_discovery_status(seed_ip) - if ( - status - and status.get("status") in self.MANAGEABLE_STATUSES - ): + if status and status.get("status") in self.MANAGEABLE_STATUSES: self.log.info(f"Discovery completed for {seed_ip}") return status - if ( - status - and status.get("status") in self.FAILED_STATUSES - ): - self.log.error( - f"Discovery failed for {seed_ip}: {status}" - ) + if status and status.get("status") in self.FAILED_STATUSES: + self.log.error(f"Discovery failed for {seed_ip}: {status}") return None self.log.debug( - f"Discovery attempt {attempt + 1}/{attempts} " - f"for {seed_ip}" + f"Discovery attempt {attempt + 1}/{attempts} " f"for {seed_ip}" ) time.sleep(interval) @@ -563,9 +536,7 @@ def wait_for_discovery( # Phase Helpers – System Mode # ===================================================================== - def _wait_for_system_mode( - self, serial_numbers: List[str] - ) -> bool: + def _wait_for_system_mode(self, serial_numbers: List[str]) -> bool: """Poll until all switches transition from migration mode to normal mode. Args: @@ -594,8 +565,7 @@ def _wait_for_system_mode( return False self.log.info( - "All switches in normal system mode — " - "proceeding to discovery checks" + "All switches in normal system mode — " "proceeding to discovery checks" ) return True @@ -618,11 +588,7 @@ def _poll_system_mode( Empty list on success, ``None`` on timeout or API error. """ pending = list(serial_numbers) - label = ( - f"exit '{target_mode}'" - if expect_match - else f"enter '{target_mode}'" - ) + label = f"exit '{target_mode}'" if expect_match else f"enter '{target_mode}'" for attempt in range(1, self.max_attempts + 1): if not pending: @@ -637,9 +603,7 @@ def _poll_system_mode( ) if not remaining: - self.log.info( - f"All switches {label} mode (attempt {attempt})" - ) + self.log.info(f"All switches {label} mode (attempt {attempt})") return remaining pending = remaining @@ -648,13 +612,9 @@ def _poll_system_mode( f"{len(pending)} switch(es) waiting to " f"{label}: {pending}" ) - time.sleep( - self.wait_interval * self._MIGRATION_SLEEP_FACTOR - ) + time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) - self.log.warning( - f"Timeout waiting for switches to {label}: {pending}" - ) + self.log.warning(f"Timeout waiting for switches to {label}: {pending}") return None # ===================================================================== @@ -681,26 +641,18 @@ def _filter_by_system_mode( Returns: Serial numbers still waiting. """ - switch_index = { - sw.get("serialNumber"): sw for sw in switch_data - } + switch_index = {sw.get("serialNumber"): sw for sw in switch_data} remaining: List[str] = [] for sn in serial_numbers: sw = switch_index.get(sn) if sw is None: remaining.append(sn) continue - mode = ( - sw.get("additionalData", {}) - .get("systemMode", "") - .lower() - ) + mode = sw.get("additionalData", {}).get("systemMode", "").lower() # expect_match=True: "still in target_mode" → not done # expect_match=False: "not yet in target_mode" → not done still_waiting = ( - (mode == target_mode) - if expect_match - else (mode != target_mode) + (mode == target_mode) if expect_match else (mode != target_mode) ) if still_waiting: remaining.append(sn) @@ -722,20 +674,14 @@ def _filter_by_discovery_status( Returns: Serial numbers still waiting. """ - switch_index = { - sw.get("serialNumber"): sw for sw in switch_data - } + switch_index = {sw.get("serialNumber"): sw for sw in switch_data} remaining: List[str] = [] for sn in serial_numbers: sw = switch_index.get(sn) if sw is None: remaining.append(sn) continue - status = ( - sw.get("additionalData", {}) - .get("discoveryStatus", "") - .lower() - ) + status = sw.get("additionalData", {}).get("discoveryStatus", "").lower() if status != target_state: remaining.append(sn) return remaining @@ -790,13 +736,10 @@ def _wait_for_discovery_state( f"{len(pending)} switch(es) not yet " f"'{target_state}': {pending}" ) - time.sleep( - self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR - ) + time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) self.log.warning( - f"Timeout waiting for '{target_state}' state: " - f"{serial_numbers}" + f"Timeout waiting for '{target_state}' state: " f"{serial_numbers}" ) return False @@ -837,12 +780,8 @@ def _wait_for_switches_in_fabric( time.sleep(self.wait_interval) continue - known_serials = { - sw.get("serialNumber") for sw in switch_data - } - pending = [ - sn for sn in pending if sn not in known_serials - ] + known_serials = {sw.get("serialNumber") for sw in switch_data} + pending = [sn for sn in pending if sn not in known_serials] if not pending: self.log.info( @@ -857,9 +796,7 @@ def _wait_for_switches_in_fabric( ) time.sleep(self.wait_interval) - self.log.warning( - f"Timeout waiting for switches to appear in fabric: {pending}" - ) + self.log.warning(f"Timeout waiting for switches to appear in fabric: {pending}") return False def _fetch_switch_data( @@ -877,18 +814,14 @@ def _fetch_switch_data( ) switch_data = response.get("switches", []) if not switch_data: - self.log.error( - "No switch data returned for fabric" - ) + self.log.error("No switch data returned for fabric") return None return switch_data except Exception as e: self.log.error(f"Failed to fetch switch data: {e}") return None - def _trigger_rediscovery( - self, serial_numbers: List[str] - ) -> None: + def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: """POST a rediscovery request for the given switches. Args: @@ -898,9 +831,7 @@ def _trigger_rediscovery( return payload = {"switchIds": serial_numbers} - self.log.info( - f"Triggering rediscovery for: {serial_numbers}" - ) + self.log.info(f"Triggering rediscovery for: {serial_numbers}") try: self.nd.request( self.ep_rediscover.path, @@ -908,12 +839,11 @@ def _trigger_rediscovery( data=payload, ) except Exception as e: - self.log.warning( - f"Failed to trigger rediscovery: {e}" - ) + self.log.warning(f"Failed to trigger rediscovery: {e}") def _get_discovery_status( - self, seed_ip: str, + self, + seed_ip: str, ) -> Optional[Dict[str, Any]]: """GET discovery status for a single switch by IP. @@ -929,16 +859,11 @@ def _get_discovery_status( verb=self.ep_inventory_discover.verb, ) for switch in response.get("switches", []): - if ( - switch.get("ip") == seed_ip - or switch.get("ipaddr") == seed_ip - ): + if switch.get("ip") == seed_ip or switch.get("ipaddr") == seed_ip: return switch return None except Exception as e: - self.log.debug( - f"Discovery status check failed: {e}" - ) + self.log.debug(f"Discovery status check failed: {e}") return None def _is_greenfield_debug_enabled(self) -> bool: @@ -956,23 +881,15 @@ def _is_greenfield_debug_enabled(self) -> bool: try: fabric_info = self.fabric_utils.get_fabric_info() self.log.debug( - f"Fabric info retrieved for greenfield check: " - f"{fabric_info}" + f"Fabric info retrieved for greenfield check: " f"{fabric_info}" ) flag = ( - fabric_info - .get("management", {}) - .get("greenfieldDebugFlag", "") - .lower() - ) - self.log.debug( - f"Greenfield debug flag value: '{flag}'" + fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() ) + self.log.debug(f"Greenfield debug flag value: '{flag}'") self._greenfield_debug_enabled = flag == "enable" except Exception as e: - self.log.debug( - f"Failed to get greenfield debug flag: {e}" - ) + self.log.debug(f"Failed to get greenfield debug flag: {e}") self._greenfield_debug_enabled = False return self._greenfield_debug_enabled diff --git a/plugins/module_utils/models/manage_switches/__init__.py b/plugins/module_utils/models/manage_switches/__init__.py index 83020728..c093d9e4 100644 --- a/plugins/module_utils/models/manage_switches/__init__.py +++ b/plugins/module_utils/models/manage_switches/__init__.py @@ -31,7 +31,9 @@ ) # --- Validators --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators # noqa: F401 +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) # noqa: F401 # --- Nested / shared models --- from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( # noqa: F401 @@ -91,7 +93,6 @@ SwitchConfigModel, ) - __all__ = [ # Enums "AdvisoryLevel", diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index 224d7fa9..f259cc6c 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -18,32 +18,34 @@ from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( RemoteCredentialStore, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) class BootstrapBaseData(NDNestedModel): """ Device-reported data embedded in a bootstrap API entry. """ + identifiers: ClassVar[List[str]] = [] gateway_ip_mask: Optional[str] = Field( - default=None, - alias="gatewayIpMask", - description="Gateway IP address with mask" + default=None, alias="gatewayIpMask", description="Gateway IP address with mask" ) models: Optional[List[str]] = Field( - default=None, - description="Supported models for switch" + default=None, description="Supported models for switch" ) - @field_validator('gateway_ip_mask', mode='before') + @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: Optional[str]) -> Optional[str]: return SwitchValidators.validate_cidr(v) @@ -53,37 +55,31 @@ class BootstrapBaseModel(NDBaseModel): """ Common hardware and policy properties shared across bootstrap, pre-provision, and RMA operations. """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" gateway_ip_mask: str = Field( - ..., - alias="gatewayIpMask", - description="Gateway IP address with mask" - ) - model: str = Field( - ..., - description="Model of the bootstrap switch" + ..., alias="gatewayIpMask", description="Gateway IP address with mask" ) + model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., alias="softwareVersion", - description="Software version of the bootstrap switch" + description="Software version of the bootstrap switch", ) image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Image policy associated with the switch during bootstrap" - ) - switch_role: Optional[SwitchRole] = Field( - default=None, - alias="switchRole" + description="Image policy associated with the switch during bootstrap", ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") data: Optional[BootstrapBaseData] = Field( - default=None, - description="Additional bootstrap data" + default=None, description="Additional bootstrap data" ) - @field_validator('gateway_ip_mask', mode='before') + @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: str) -> str: result = SwitchValidators.validate_cidr(v) @@ -99,44 +95,45 @@ class BootstrapCredentialModel(NDBaseModel): When useNewCredentials is true, separate discovery credentials are used for post-bootstrap switch discovery instead of the admin password. """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] password: str = Field( - ..., - description="Switch password to be set during bootstrap for admin user" + ..., description="Switch password to be set during bootstrap for admin user" ) discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., - alias="discoveryAuthProtocol" + ..., alias="discoveryAuthProtocol" ) use_new_credentials: bool = Field( default=False, alias="useNewCredentials", - description="If True, use discoveryUsername and discoveryPassword" + description="If True, use discoveryUsername and discoveryPassword", ) discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", - description="Username to be used for switch discovery post bootstrap" + description="Username to be used for switch discovery post bootstrap", ) discovery_password: Optional[str] = Field( default=None, alias="discoveryPassword", - description="Password associated with the corresponding switch discovery user" + description="Password associated with the corresponding switch discovery user", ) remote_credential_store: RemoteCredentialStore = Field( default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore", - description="Type of credential store for discovery credentials" + description="Type of credential store for discovery credentials", ) remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", - description="Remote credential store key for discovery credentials" + description="Remote credential store key for discovery credentials", ) - @model_validator(mode='after') + @model_validator(mode="after") def validate_credentials(self) -> Self: """Validate credential configuration logic.""" if self.use_new_credentials: @@ -159,48 +156,33 @@ class BootstrapImportSpecificModel(NDBaseModel): """ Switch-identifying fields returned by the bootstrap GET API prior to import. """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - hostname: str = Field( - ..., - description="Hostname of the bootstrap switch" - ) - ip: str = Field( - ..., - description="IP address of the bootstrap switch" - ) + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" + hostname: str = Field(..., description="Hostname of the bootstrap switch") + ip: str = Field(..., description="IP address of the bootstrap switch") serial_number: str = Field( - ..., - alias="serialNumber", - description="Serial number of the bootstrap switch" + ..., alias="serialNumber", description="Serial number of the bootstrap switch" ) in_inventory: bool = Field( ..., alias="inInventory", - description="True if the bootstrap switch is in inventory" - ) - public_key: str = Field( - ..., - alias="publicKey", - description="Public Key" - ) - finger_print: str = Field( - ..., - alias="fingerPrint", - description="Fingerprint" + description="True if the bootstrap switch is in inventory", ) + public_key: str = Field(..., alias="publicKey", description="Public Key") + finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") dhcp_bootstrap_ip: Optional[str] = Field( default=None, alias="dhcpBootstrapIp", - description="This is used for device day-0 bring-up when using inband reachability" + description="This is used for device day-0 bring-up when using inband reachability", ) seed_switch: bool = Field( - default=False, - alias="seedSwitch", - description="Use as seed switch" + default=False, alias="seedSwitch", description="Use as seed switch" ) - @field_validator('hostname', mode='before') + @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: result = SwitchValidators.validate_hostname(v) @@ -208,14 +190,14 @@ def validate_host(cls, v: str) -> str: raise ValueError("hostname cannot be empty") return result - @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") @classmethod def validate_ip(cls, v: Optional[str]) -> Optional[str]: if v is None: return None return SwitchValidators.validate_ip_address(v) - @field_validator('serial_number', mode='before') + @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -230,97 +212,73 @@ class BootstrapImportSwitchModel(NDBaseModel): Path: POST /fabrics/{fabricName}/switchActions/importBootstrap """ + identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] serial_number: str = Field( - ..., - alias="serialNumber", - description="Serial number of the bootstrap switch" - ) - model: str = Field( - ..., - description="Model of the bootstrap switch" + ..., alias="serialNumber", description="Serial number of the bootstrap switch" ) + model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., alias="softwareVersion", - description="Software version of the bootstrap switch" - ) - hostname: str = Field( - ..., - description="Hostname of the bootstrap switch" - ) - ip: str = Field( - ..., - description="IP address of the bootstrap switch" + description="Software version of the bootstrap switch", ) + hostname: str = Field(..., description="Hostname of the bootstrap switch") + ip: str = Field(..., description="IP address of the bootstrap switch") password: str = Field( - ..., - description="Switch password to be set during bootstrap for admin user" + ..., description="Switch password to be set during bootstrap for admin user" ) discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., - alias="discoveryAuthProtocol" - ) - discovery_username: Optional[str] = Field( - default=None, - alias="discoveryUsername" - ) - discovery_password: Optional[str] = Field( - default=None, - alias="discoveryPassword" + ..., alias="discoveryAuthProtocol" ) + discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") + discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") remote_credential_store: RemoteCredentialStore = Field( default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore", - description="Type of credential store for discovery credentials" + description="Type of credential store for discovery credentials", ) remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", - description="Remote credential store key for discovery credentials" + description="Remote credential store key for discovery credentials", ) data: Optional[Dict[str, Any]] = Field( default=None, - description="Bootstrap configuration data block (gatewayIpMask, models)" + description="Bootstrap configuration data block (gatewayIpMask, models)", ) fingerprint: str = Field( default="", alias="fingerPrint", - description="SSH fingerprint from bootstrap GET API" + description="SSH fingerprint from bootstrap GET API", ) public_key: str = Field( default="", alias="publicKey", - description="SSH public key from bootstrap GET API" + description="SSH public key from bootstrap GET API", ) re_add: bool = Field( default=False, alias="reAdd", - description="Whether to re-add an already-seen switch" - ) - in_inventory: bool = Field( - default=False, - alias="inInventory" + description="Whether to re-add an already-seen switch", ) + in_inventory: bool = Field(default=False, alias="inInventory") image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Image policy associated with the switch during bootstrap" - ) - switch_role: Optional[SwitchRole] = Field( - default=None, - alias="switchRole" + description="Image policy associated with the switch during bootstrap", ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") gateway_ip_mask: str = Field( - ..., - alias="gatewayIpMask", - description="Gateway IP address with mask" + ..., alias="gatewayIpMask", description="Gateway IP address with mask" ) - @field_validator('ip', mode='before') + @field_validator("ip", mode="before") @classmethod def validate_ip_field(cls, v: str) -> str: result = SwitchValidators.validate_ip_address(v) @@ -328,7 +286,7 @@ def validate_ip_field(cls, v: str) -> str: raise ValueError(f"Invalid IP address: {v}") return result - @field_validator('hostname', mode='before') + @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: result = SwitchValidators.validate_hostname(v) @@ -336,7 +294,7 @@ def validate_host(cls, v: str) -> str: raise ValueError("hostname cannot be empty") return result - @field_validator('serial_number', mode='before') + @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -366,18 +324,18 @@ class ImportBootstrapSwitchesRequestModel(NDBaseModel): Path: POST /fabrics/{fabricName}/switchActions/importBootstrap """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" switches: List[BootstrapImportSwitchModel] = Field( - ..., - description="PowerOn Auto Provisioning switches" + ..., description="PowerOn Auto Provisioning switches" ) def to_payload(self) -> Dict[str, Any]: """Convert to API payload format.""" - return { - "switches": [s.to_payload() for s in self.switches] - } + return {"switches": [s.to_payload() for s in self.switches]} __all__ = [ diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 2f6873c1..0f612b39 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -17,19 +17,29 @@ import socket from ipaddress import ip_address -from pydantic import Field, ValidationInfo, computed_field, field_validator, model_validator +from pydantic import ( + Field, + ValidationInfo, + computed_field, + field_validator, + model_validator, +) from typing import Any, Dict, List, Optional, ClassVar, Literal, Union from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( PlatformType, SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) class ConfigDataModel(NDNestedModel): @@ -38,20 +48,20 @@ class ConfigDataModel(NDNestedModel): Maps to config.poap.config_data and config.rma.config_data in the playbook. """ + identifiers: ClassVar[List[str]] = [] models: List[str] = Field( ..., alias="models", min_length=1, - description="List of model of modules in switch to Bootstrap/Pre-provision/RMA" + description="List of model of modules in switch to Bootstrap/Pre-provision/RMA", ) gateway: str = Field( - ..., - description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)" + ..., description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)" ) - @field_validator('models', mode='before') + @field_validator("models", mode="before") @classmethod def validate_models_list(cls, v: Any) -> List[str]: """Validate models is a non-empty list of strings.""" @@ -74,7 +84,7 @@ def validate_models_list(cls, v: Any) -> List[str]: ) return v - @field_validator('gateway', mode='before') + @field_validator("gateway", mode="before") @classmethod def validate_gateway(cls, v: str) -> str: """Validate gateway is a valid CIDR.""" @@ -92,6 +102,7 @@ class POAPConfigModel(NDNestedModel): If the bootstrap API reports a different hostname or role, the API value overrides the user-provided value and a warning is logged. """ + identifiers: ClassVar[List[str]] = [] # Mandatory @@ -99,31 +110,28 @@ class POAPConfigModel(NDNestedModel): ..., alias="serialNumber", min_length=1, - description="Serial number of the physical switch to Bootstrap" - ) - hostname: str = Field( - ..., - description="Hostname for the switch during bootstrap" + description="Serial number of the physical switch to Bootstrap", ) + hostname: str = Field(..., description="Hostname for the switch during bootstrap") # Optional discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", - description="Username for device discovery during POAP" + description="Username for device discovery during POAP", ) discovery_password: Optional[str] = Field( default=None, alias="discoveryPassword", - description="Password for device discovery during POAP" + description="Password for device discovery during POAP", ) image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Name of the image policy to be applied on switch" + description="Name of the image policy to be applied on switch", ) - @model_validator(mode='after') + @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> Self: """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) @@ -138,7 +146,7 @@ def validate_discovery_credentials_pair(self) -> Self: ) return self - @field_validator('serial_number', mode='before') + @field_validator("serial_number", mode="before") @classmethod def validate_serial_number_field(cls, v: str) -> str: """Validate serial_number is not empty.""" @@ -156,6 +164,7 @@ class PreprovisionConfigModel(NDNestedModel): and ``config_data`` — are mandatory because the controller has no physical switch to pull these values from. """ + identifiers: ClassVar[List[str]] = [] # Mandatory @@ -163,21 +172,16 @@ class PreprovisionConfigModel(NDNestedModel): ..., alias="serialNumber", min_length=1, - description="Serial number of switch to Pre-provision" + description="Serial number of switch to Pre-provision", ) model: str = Field( - ..., - min_length=1, - description="Model of switch to Pre-provision" + ..., min_length=1, description="Model of switch to Pre-provision" ) version: str = Field( - ..., - min_length=1, - description="Software version of switch to Pre-provision" + ..., min_length=1, description="Software version of switch to Pre-provision" ) hostname: str = Field( - ..., - description="Hostname for the switch during pre-provision" + ..., description="Hostname for the switch during pre-provision" ) config_data: ConfigDataModel = Field( ..., @@ -192,20 +196,20 @@ class PreprovisionConfigModel(NDNestedModel): discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", - description="Username for device discovery during pre-provision" + description="Username for device discovery during pre-provision", ) discovery_password: Optional[str] = Field( default=None, alias="discoveryPassword", - description="Password for device discovery during pre-provision" + description="Password for device discovery during pre-provision", ) image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Image policy to apply during pre-provision" + description="Image policy to apply during pre-provision", ) - @model_validator(mode='after') + @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> Self: """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) @@ -220,7 +224,7 @@ def validate_discovery_credentials_pair(self) -> Self: ) return self - @field_validator('serial_number', mode='before') + @field_validator("serial_number", mode="before") @classmethod def validate_serial_number_field(cls, v: str) -> str: """Validate serial_number is not empty.""" @@ -237,18 +241,19 @@ class RMAConfigModel(NDNestedModel): The switch being replaced must be in maintenance mode and either shut down or disconnected from the network before initiating the RMA operation. """ + identifiers: ClassVar[List[str]] = [] # Discovery credentials discovery_username: Optional[str] = Field( default=None, alias="discoveryUsername", - description="Username for device discovery during POAP and RMA discovery" + description="Username for device discovery during POAP and RMA discovery", ) discovery_password: Optional[str] = Field( default=None, alias="discoveryPassword", - description="Password for device discovery during POAP and RMA discovery" + description="Password for device discovery during POAP and RMA discovery", ) # Required fields for RMA @@ -256,30 +261,30 @@ class RMAConfigModel(NDNestedModel): ..., alias="newSerialNumber", min_length=1, - description="Serial number of the new/replacement switch to Bootstrap for RMA" + description="Serial number of the new/replacement switch to Bootstrap for RMA", ) old_serial_number: str = Field( ..., alias="oldSerialNumber", min_length=1, - description="Serial number of the existing switch to be replaced by RMA" + description="Serial number of the existing switch to be replaced by RMA", ) model: Optional[str] = Field( default=None, min_length=1, - description="Model of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API." + description="Model of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API.", ) version: Optional[str] = Field( default=None, min_length=1, - description="Software version of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API." + description="Software version of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API.", ) # Optional fields image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Name of the image policy to be applied on switch during Bootstrap for RMA" + description="Name of the image policy to be applied on switch during Bootstrap for RMA", ) # Optional config data for RMA (models list + gateway); sourced from bootstrap API if omitted @@ -293,7 +298,7 @@ class RMAConfigModel(NDNestedModel): ), ) - @field_validator('new_serial_number', 'old_serial_number', mode='before') + @field_validator("new_serial_number", "old_serial_number", mode="before") @classmethod def validate_serial_numbers(cls, v: str) -> str: """Validate serial numbers are not empty.""" @@ -302,7 +307,7 @@ def validate_serial_numbers(cls, v: str) -> str: raise ValueError("Serial number cannot be empty") return result - @model_validator(mode='after') + @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> Self: """Validate that discovery_username and discovery_password are both set or both absent. @@ -331,12 +336,19 @@ class SwitchConfigModel(NDBaseModel): (both poap+preprovision), and RMA operations. The operation type is derived from the presence of poap, preprovision, and/or rma fields. """ + identifiers: ClassVar[List[str]] = ["seed_ip"] # Fields excluded from diff — only seed_ip + role are compared exclude_from_diff: ClassVar[List[str]] = [ - "username", "password", "auth_proto", - "preserve_config", "platform_type", "poap", "preprovision", "rma", + "username", + "password", + "auth_proto", + "preserve_config", + "platform_type", + "poap", + "preprovision", + "rma", "operation_type", ] @@ -345,59 +357,61 @@ class SwitchConfigModel(NDBaseModel): ..., alias="seedIp", min_length=1, - description="Seed IP address or DNS name of the switch" + description="Seed IP address or DNS name of the switch", ) # Optional fields — required for merged/overridden, optional for query/deleted username: Optional[str] = Field( default=None, alias="userName", - description="Login username to the switch (required for merged/overridden states)" + description="Login username to the switch (required for merged/overridden states)", ) password: Optional[str] = Field( default=None, - description="Login password to the switch (required for merged/overridden states)" + description="Login password to the switch (required for merged/overridden states)", ) # Optional fields with defaults auth_proto: SnmpV3AuthProtocol = Field( default=SnmpV3AuthProtocol.MD5, alias="authProto", - description="Authentication protocol to use" + description="Authentication protocol to use", ) role: Optional[SwitchRole] = Field( default=None, - description="Role to assign to the switch. None means not specified (uses controller default)." + description="Role to assign to the switch. None means not specified (uses controller default).", ) preserve_config: bool = Field( default=False, alias="preserveConfig", - description="Set to false for greenfield, true for brownfield deployment" + description="Set to false for greenfield, true for brownfield deployment", ) platform_type: PlatformType = Field( default=PlatformType.NX_OS, alias="platformType", - description="Platform type of the switch (nx-os, ios-xe, etc.)" + description="Platform type of the switch (nx-os, ios-xe, etc.)", ) # POAP, Pre-provision and RMA configurations poap: Optional[POAPConfigModel] = Field( default=None, - description="Bootstrap POAP config (serial_number + hostname mandatory)" + description="Bootstrap POAP config (serial_number + hostname mandatory)", ) preprovision: Optional[PreprovisionConfigModel] = Field( default=None, - description="Pre-provision config (serial_number, model, version, hostname, config_data all mandatory)" + description="Pre-provision config (serial_number, model, version, hostname, config_data all mandatory)", ) rma: Optional[List[RMAConfigModel]] = Field( default=None, - description="RMA (Return Material Authorization) configurations for switch replacement" + description="RMA (Return Material Authorization) configurations for switch replacement", ) # Computed fields @computed_field @property - def operation_type(self) -> Literal["normal", "poap", "preprovision", "swap", "rma"]: + def operation_type( + self, + ) -> Literal["normal", "poap", "preprovision", "swap", "rma"]: """Determine the operation type from this config. Returns: @@ -424,15 +438,22 @@ def to_config_dict(self) -> Dict[str, Any]: Dict of config fields with ``username``, ``password``, ``discovery_username``, and ``discovery_password`` excluded. """ - return self.to_config(exclude={ - "username": True, - "password": True, - "poap": {"discovery_username": True, "discovery_password": True}, - "preprovision": {"discovery_username": True, "discovery_password": True}, - "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, - }) - - @model_validator(mode='after') + return self.to_config( + exclude={ + "username": True, + "password": True, + "poap": {"discovery_username": True, "discovery_password": True}, + "preprovision": { + "discovery_username": True, + "discovery_password": True, + }, + "rma": { + "__all__": {"discovery_username": True, "discovery_password": True} + }, + } + ) + + @model_validator(mode="after") def reject_auth_proto_for_special_ops(self) -> Self: """Reject non-MD5 auth_proto when POAP, Pre-provision, Swap or RMA is configured. @@ -440,7 +461,9 @@ def reject_auth_proto_for_special_ops(self) -> Self: all inputs have already been coerced by Pydantic into a typed SnmpV3AuthProtocol value, so a direct enum comparison is safe. """ - if (self.poap or self.preprovision or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: + if ( + self.poap or self.preprovision or self.rma + ) and self.auth_proto != SnmpV3AuthProtocol.MD5: if self.poap or self.preprovision: op = "POAP/Pre-provision" else: @@ -452,7 +475,7 @@ def reject_auth_proto_for_special_ops(self) -> Self: ) return self - @model_validator(mode='after') + @model_validator(mode="after") def validate_special_ops_exclusion(self) -> Self: """Validate mutually exclusive operation combinations. @@ -471,7 +494,7 @@ def validate_special_ops_exclusion(self) -> Self: ) return self - @model_validator(mode='after') + @model_validator(mode="after") def validate_special_ops_credentials(self) -> Self: """Validate credentials for POAP, Pre-provision, Swap and RMA operations.""" if self.poap or self.preprovision or self.rma: @@ -485,7 +508,7 @@ def validate_special_ops_credentials(self) -> Self: ) return self - @model_validator(mode='after') + @model_validator(mode="after") def apply_state_defaults(self, info: ValidationInfo) -> Self: """Apply state-aware defaults and enforcement using validation context. @@ -523,7 +546,7 @@ def apply_state_defaults(self, info: ValidationInfo) -> Self: ) return self - @field_validator('seed_ip', mode='before') + @field_validator("seed_ip", mode="before") @classmethod def validate_seed_ip(cls, v: str) -> str: """Resolve seed_ip to an IP address. @@ -558,7 +581,7 @@ def validate_seed_ip(cls, v: str) -> str: f"'{v}' is not a valid IP address and could not be resolved via DNS" ) - @field_validator('rma', mode='before') + @field_validator("rma", mode="before") @classmethod def validate_rma_list_not_empty(cls, v: Optional[List]) -> Optional[List]: """Validate that if RMA list is provided, it is not empty.""" @@ -566,13 +589,15 @@ def validate_rma_list_not_empty(cls, v: Optional[List]) -> Optional[List]: raise ValueError("RMA list cannot be empty if provided") return v - @field_validator('auth_proto', mode='before') + @field_validator("auth_proto", mode="before") @classmethod - def normalize_auth_proto(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + def normalize_auth_proto( + cls, v: Union[str, SnmpV3AuthProtocol, None] + ) -> SnmpV3AuthProtocol: """Normalize auth_proto to handle case-insensitive input (MD5, md5, etc.).""" return SnmpV3AuthProtocol.normalize(v) - @field_validator('role', mode='before') + @field_validator("role", mode="before") @classmethod def normalize_role(cls, v: Union[str, SwitchRole, None]) -> Optional[SwitchRole]: """Normalize role for case-insensitive and underscore-to-camelCase matching. @@ -581,7 +606,7 @@ def normalize_role(cls, v: Union[str, SwitchRole, None]) -> Optional[SwitchRole] return None return SwitchRole.normalize(v) - @field_validator('platform_type', mode='before') + @field_validator("platform_type", mode="before") @classmethod def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: """Normalize platform_type for case-insensitive matching (NX_OS, nx-os, etc.).""" @@ -644,13 +669,15 @@ def to_gathered_dict(self) -> Dict[str, Any]: Dict with seed_ip, role, auth_proto, preserve_config, username set to ``""``, password set to ``""``. """ - result = self.to_config(exclude={ - "platform_type": True, - "poap": True, - "preprovision": True, - "rma": True, - "operation_type": True, - }) + result = self.to_config( + exclude={ + "platform_type": True, + "poap": True, + "preprovision": True, + "rma": True, + "operation_type": True, + } + ) result["username"] = "" result["password"] = "" return result diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index 1475edf8..21188515 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -26,7 +26,9 @@ SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) class ShallowDiscoveryRequestModel(NDBaseModel): @@ -35,52 +37,47 @@ class ShallowDiscoveryRequestModel(NDBaseModel): Path: POST /fabrics/{fabricName}/actions/shallowDiscovery """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] seed_ip_collection: List[str] = Field( ..., alias="seedIpCollection", min_length=1, - description="Seed switch IP collection" - ) - max_hop: int = Field( - default=2, - alias="maxHop", - ge=0, - le=7, - description="Max hop" + description="Seed switch IP collection", ) + max_hop: int = Field(default=2, alias="maxHop", ge=0, le=7, description="Max hop") platform_type: ShallowDiscoveryPlatformType = Field( default=ShallowDiscoveryPlatformType.NX_OS, alias="platformType", - description="Switch platform type (apic is not supported for shallow discovery)" + description="Switch platform type (apic is not supported for shallow discovery)", ) snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( default=SnmpV3AuthProtocol.MD5, alias="snmpV3AuthProtocol", - description="SNMPv3 authentication protocols" + description="SNMPv3 authentication protocols", ) username: Optional[str] = Field( - default=None, - description="User name for switch login" + default=None, description="User name for switch login" ) password: Optional[str] = Field( - default=None, - description="User password for switch login" + default=None, description="User password for switch login" ) remote_credential_store: Optional[RemoteCredentialStore] = Field( default=None, alias="remoteCredentialStore", - description="Type of credential store" + description="Type of credential store", ) remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", - description="Remote credential store key" + description="Remote credential store key", ) - @field_validator('seed_ip_collection', mode='before') + @field_validator("seed_ip_collection", mode="before") @classmethod def validate_seed_ips(cls, v: List[str]) -> List[str]: """Validate all seed IPs.""" @@ -95,15 +92,19 @@ def validate_seed_ips(cls, v: List[str]) -> List[str]: raise ValueError("No valid seed IPs provided") return validated - @field_validator('snmp_v3_auth_protocol', mode='before') + @field_validator("snmp_v3_auth_protocol", mode="before") @classmethod - def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + def normalize_snmp_auth( + cls, v: Union[str, SnmpV3AuthProtocol, None] + ) -> SnmpV3AuthProtocol: """Normalize SNMP auth protocol (case-insensitive).""" return SnmpV3AuthProtocol.normalize(v) - @field_validator('platform_type', mode='before') + @field_validator("platform_type", mode="before") @classmethod - def normalize_platform(cls, v: Union[str, ShallowDiscoveryPlatformType, None]) -> ShallowDiscoveryPlatformType: + def normalize_platform( + cls, v: Union[str, ShallowDiscoveryPlatformType, None] + ) -> ShallowDiscoveryPlatformType: """Normalize platform type (case-insensitive).""" return ShallowDiscoveryPlatformType.normalize(v) @@ -114,48 +115,36 @@ class SwitchDiscoveryModel(NDBaseModel): For N7K user VDC deployments, the serial number format is serialNumber:vDCName. """ + identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - hostname: str = Field( - ..., - description="Switch host name" - ) - ip: str = Field( - ..., - description="Switch IPv4/v6 address" - ) + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" + hostname: str = Field(..., description="Switch host name") + ip: str = Field(..., description="Switch IPv4/v6 address") serial_number: str = Field( - ..., - alias="serialNumber", - description="Switch serial number" - ) - model: str = Field( - ..., - description="Switch model" + ..., alias="serialNumber", description="Switch serial number" ) + model: str = Field(..., description="Switch model") software_version: Optional[str] = Field( - default=None, - alias="softwareVersion", - description="Switch software version" + default=None, alias="softwareVersion", description="Switch software version" ) vdc_id: Optional[int] = Field( default=None, alias="vdcId", ge=0, - description="N7K VDC ID. Mandatory for N7K switch discovery" + description="N7K VDC ID. Mandatory for N7K switch discovery", ) vdc_mac: Optional[str] = Field( default=None, alias="vdcMac", - description="N7K VDC Mac address. Mandatory for N7K switch discovery" + description="N7K VDC Mac address. Mandatory for N7K switch discovery", ) switch_role: Optional[SwitchRole] = Field( - default=None, - alias="switchRole", - description="Switch role" + default=None, alias="switchRole", description="Switch role" ) - @field_validator('hostname', mode='before') + @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: result = SwitchValidators.validate_hostname(v) @@ -163,7 +152,7 @@ def validate_host(cls, v: str) -> str: raise ValueError("hostname cannot be empty") return result - @field_validator('ip', mode='before') + @field_validator("ip", mode="before") @classmethod def validate_ip(cls, v: str) -> str: result = SwitchValidators.validate_ip_address(v) @@ -171,7 +160,7 @@ def validate_ip(cls, v: str) -> str: raise ValueError("ip cannot be empty") return result - @field_validator('serial_number', mode='before') + @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -179,7 +168,7 @@ def validate_serial(cls, v: str) -> str: raise ValueError("serial_number cannot be empty") return result - @field_validator('vdc_mac', mode='before') + @field_validator("vdc_mac", mode="before") @classmethod def validate_mac(cls, v: Optional[str]) -> Optional[str]: return SwitchValidators.validate_mac_address(v) @@ -191,71 +180,71 @@ class AddSwitchesRequestModel(NDBaseModel): Path: POST /fabrics/{fabricName}/switches """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] switches: List[SwitchDiscoveryModel] = Field( - ..., - min_length=1, - description="The list of switches to be imported" + ..., min_length=1, description="The list of switches to be imported" ) platform_type: PlatformType = Field( default=PlatformType.NX_OS, alias="platformType", - description="Switch platform type" + description="Switch platform type", ) preserve_config: bool = Field( default=True, alias="preserveConfig", - description="Flag to preserve the switch configuration after import" + description="Flag to preserve the switch configuration after import", ) snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( default=SnmpV3AuthProtocol.MD5, alias="snmpV3AuthProtocol", - description="SNMPv3 authentication protocols" + description="SNMPv3 authentication protocols", ) use_credential_for_write: Optional[bool] = Field( default=None, alias="useCredentialForWrite", - description="Flag to use the discovery credential as LAN credential" + description="Flag to use the discovery credential as LAN credential", ) username: Optional[str] = Field( - default=None, - description="User name for switch login" + default=None, description="User name for switch login" ) password: Optional[str] = Field( - default=None, - description="User password for switch login" + default=None, description="User password for switch login" ) remote_credential_store: Optional[RemoteCredentialStore] = Field( default=None, alias="remoteCredentialStore", - description="Type of credential store" + description="Type of credential store", ) remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", - description="Remote credential store key" + description="Remote credential store key", ) def to_payload(self) -> Dict[str, Any]: """Convert to API payload format.""" payload = self.model_dump(by_alias=True, exclude_none=True) # Convert nested switches to payload format - if 'switches' in payload: - payload['switches'] = [ - s.to_payload() if hasattr(s, 'to_payload') else s - for s in self.switches + if "switches" in payload: + payload["switches"] = [ + s.to_payload() if hasattr(s, "to_payload") else s for s in self.switches ] return payload - @field_validator('snmp_v3_auth_protocol', mode='before') + @field_validator("snmp_v3_auth_protocol", mode="before") @classmethod - def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + def normalize_snmp_auth( + cls, v: Union[str, SnmpV3AuthProtocol, None] + ) -> SnmpV3AuthProtocol: """Normalize SNMP auth protocol (case-insensitive: MD5, md5, etc.).""" return SnmpV3AuthProtocol.normalize(v) - @field_validator('platform_type', mode='before') + @field_validator("platform_type", mode="before") @classmethod def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: """Normalize platform type (case-insensitive: NX_OS, nx-os, etc.).""" diff --git a/plugins/module_utils/models/manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py index edb8f28a..a79a0ec8 100644 --- a/plugins/module_utils/models/manage_switches/enums.py +++ b/plugins/module_utils/models/manage_switches/enums.py @@ -16,11 +16,11 @@ from enum import Enum from typing import List, Union - # ============================================================================= # ENUMS - Extracted from OpenAPI Schema components/schemas # ============================================================================= + class SwitchRole(str, Enum): """ Switch role enumeration. @@ -28,6 +28,7 @@ class SwitchRole(str, Enum): Based on: components/schemas/switchRole Description: The role of the switch, meta is a read-only switch role """ + BORDER = "border" BORDER_GATEWAY = "borderGateway" BORDER_GATEWAY_SPINE = "borderGatewaySpine" @@ -65,12 +66,14 @@ def from_user_input(cls, value: str) -> "SwitchRole": except ValueError: pass # Try converting underscore to camelCase - parts = value.lower().split('_') - camel_case = parts[0] + ''.join(word.capitalize() for word in parts[1:]) + parts = value.lower().split("_") + camel_case = parts[0] + "".join(word.capitalize() for word in parts[1:]) try: return cls(camel_case) except ValueError: - raise ValueError(f"Invalid switch role: {value}. Valid options: {cls.choices()}") + raise ValueError( + f"Invalid switch role: {value}. Valid options: {cls.choices()}" + ) @classmethod def normalize(cls, value: Union[str, "SwitchRole", None]) -> "SwitchRole": @@ -89,9 +92,9 @@ def normalize(cls, value: Union[str, "SwitchRole", None]) -> "SwitchRole": if role.value.lower() == v_lower: return role # Try converting underscore to camelCase - parts = v_lower.split('_') + parts = v_lower.split("_") if len(parts) > 1: - camel_case = parts[0] + ''.join(word.capitalize() for word in parts[1:]) + camel_case = parts[0] + "".join(word.capitalize() for word in parts[1:]) for role in cls: if role.value == camel_case: return role @@ -104,6 +107,7 @@ class SystemMode(str, Enum): Based on: components/schemas/systemMode """ + NORMAL = "normal" MAINTENANCE = "maintenance" MIGRATION = "migration" @@ -124,6 +128,7 @@ class PlatformType(str, Enum): Includes all platform types supported by the add-switches endpoint. Based on: components/schemas """ + NX_OS = "nx-os" OTHER = "other" IOS_XE = "ios-xe" @@ -146,12 +151,13 @@ def normalize(cls, value: Union[str, "PlatformType", None]) -> "PlatformType": if isinstance(value, cls): return value if isinstance(value, str): - v_normalized = value.lower().replace('_', '-') + v_normalized = value.lower().replace("_", "-") for pt in cls: if pt.value == v_normalized: return pt raise ValueError(f"Invalid PlatformType: {value}. Valid: {cls.choices()}") + class ShallowDiscoveryPlatformType(str, Enum): """ Platform type for shallow discovery. @@ -160,6 +166,7 @@ class ShallowDiscoveryPlatformType(str, Enum): Excludes 'apic' which is not supported by the shallowDiscovery endpoint. Based on: components/schemas/shallowDiscoveryRequest.platformType """ + NX_OS = "nx-os" OTHER = "other" IOS_XE = "ios-xe" @@ -171,7 +178,9 @@ def choices(cls) -> List[str]: return [e.value for e in cls] @classmethod - def normalize(cls, value: Union[str, "ShallowDiscoveryPlatformType", None]) -> "ShallowDiscoveryPlatformType": + def normalize( + cls, value: Union[str, "ShallowDiscoveryPlatformType", None] + ) -> "ShallowDiscoveryPlatformType": """ Normalize input to enum value (case-insensitive). Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. @@ -181,11 +190,13 @@ def normalize(cls, value: Union[str, "ShallowDiscoveryPlatformType", None]) -> " if isinstance(value, cls): return value if isinstance(value, str): - v_normalized = value.lower().replace('_', '-') + v_normalized = value.lower().replace("_", "-") for pt in cls: if pt.value == v_normalized: return pt - raise ValueError(f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}") + raise ValueError( + f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}" + ) class SnmpV3AuthProtocol(str, Enum): @@ -194,6 +205,7 @@ class SnmpV3AuthProtocol(str, Enum): Based on: components/schemas/snmpV3AuthProtocol and schemas-snmpV3AuthProtocol """ + MD5 = "md5" SHA = "sha" MD5_DES = "md5-des" @@ -219,7 +231,9 @@ def choices(cls) -> List[str]: return [e.value for e in cls] @classmethod - def normalize(cls, value: Union[str, "SnmpV3AuthProtocol", None]) -> "SnmpV3AuthProtocol": + def normalize( + cls, value: Union[str, "SnmpV3AuthProtocol", None] + ) -> "SnmpV3AuthProtocol": """ Normalize input to enum value (case-insensitive). Accepts: MD5, md5, MD5_DES, md5-des, etc. @@ -229,7 +243,7 @@ def normalize(cls, value: Union[str, "SnmpV3AuthProtocol", None]) -> "SnmpV3Auth if isinstance(value, cls): return value if isinstance(value, str): - v_normalized = value.lower().replace('_', '-') + v_normalized = value.lower().replace("_", "-") for proto in cls: if proto.value == v_normalized: return proto @@ -242,6 +256,7 @@ class DiscoveryStatus(str, Enum): Based on: components/schemas/additionalSwitchData.discoveryStatus """ + OK = "ok" DISCOVERING = "discovering" REDISCOVERING = "rediscovering" @@ -267,6 +282,7 @@ class ConfigSyncStatus(str, Enum): Based on: components/schemas/switchConfigSyncStatus """ + DEPLOYED = "deployed" DEPLOYMENT_IN_PROGRESS = "deploymentInProgress" FAILED = "failed" @@ -289,6 +305,7 @@ class VpcRole(str, Enum): Based on: components/schemas/schemas-vpcRole """ + PRIMARY = "primary" SECONDARY = "secondary" OPERATIONAL_PRIMARY = "operationalPrimary" @@ -306,6 +323,7 @@ class RemoteCredentialStore(str, Enum): Based on: components/schemas/remoteCredentialStore """ + LOCAL = "local" CYBERARK = "cyberark" @@ -320,6 +338,7 @@ class AnomalyLevel(str, Enum): Based on: components/schemas/anomalyLevel """ + CRITICAL = "critical" MAJOR = "major" MINOR = "minor" @@ -339,6 +358,7 @@ class AdvisoryLevel(str, Enum): Based on: components/schemas/advisoryLevel """ + CRITICAL = "critical" MAJOR = "major" MINOR = "minor" diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index 4425e486..b19206bf 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -25,7 +25,9 @@ SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) class PreProvisionSwitchModel(NDBaseModel): @@ -36,7 +38,9 @@ class PreProvisionSwitchModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] # --- preProvisionSpecific fields (required) --- @@ -196,7 +200,9 @@ class PreProvisionSwitchesRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" switches: List[PreProvisionSwitchModel] = Field( ..., description="PowerOn Auto Provisioning switches", @@ -204,9 +210,7 @@ class PreProvisionSwitchesRequestModel(NDBaseModel): def to_payload(self) -> Dict[str, Any]: """Convert to API payload format.""" - return { - "switches": [s.to_payload() for s in self.switches] - } + return {"switches": [s.to_payload() for s in self.switches]} __all__ = [ diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index 7585d222..e20555ac 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -24,7 +24,10 @@ SnmpV3AuthProtocol, SwitchRole, ) -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + class RMASwitchModel(NDBaseModel): """ @@ -32,98 +35,61 @@ class RMASwitchModel(NDBaseModel): Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/provisionRMA """ + identifiers: ClassVar[List[str]] = ["new_switch_id"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] # From bootstrapBase gateway_ip_mask: str = Field( - ..., - alias="gatewayIpMask", - description="Gateway IP address with mask" - ) - model: str = Field( - ..., - description="Model of the bootstrap switch" + ..., alias="gatewayIpMask", description="Gateway IP address with mask" ) + model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., alias="softwareVersion", - description="Software version of the bootstrap switch" + description="Software version of the bootstrap switch", ) image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Image policy associated with the switch during bootstrap" - ) - switch_role: Optional[SwitchRole] = Field( - default=None, - alias="switchRole" + description="Image policy associated with the switch during bootstrap", ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") # From bootstrapCredential password: str = Field( - ..., - description="Switch password to be set during bootstrap for admin user" + ..., description="Switch password to be set during bootstrap for admin user" ) discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., - alias="discoveryAuthProtocol" - ) - discovery_username: Optional[str] = Field( - default=None, - alias="discoveryUsername" - ) - discovery_password: Optional[str] = Field( - default=None, - alias="discoveryPassword" + ..., alias="discoveryAuthProtocol" ) + discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") + discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") remote_credential_store: RemoteCredentialStore = Field( - default=RemoteCredentialStore.LOCAL, - alias="remoteCredentialStore" + default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore" ) remote_credential_store_key: Optional[str] = Field( - default=None, - alias="remoteCredentialStoreKey" + default=None, alias="remoteCredentialStoreKey" ) # From RMASpecific - hostname: str = Field( - ..., - description="Hostname of the switch" - ) - ip: str = Field( - ..., - description="IP address of the switch" - ) + hostname: str = Field(..., description="Hostname of the switch") + ip: str = Field(..., description="IP address of the switch") new_switch_id: str = Field( - ..., - alias="newSwitchId", - description="SwitchId (serial number) of the switch" - ) - public_key: str = Field( - ..., - alias="publicKey", - description="Public Key" - ) - finger_print: str = Field( - ..., - alias="fingerPrint", - description="Fingerprint" - ) - dhcp_bootstrap_ip: Optional[str] = Field( - default=None, - alias="dhcpBootstrapIp" - ) - seed_switch: bool = Field( - default=False, - alias="seedSwitch" + ..., alias="newSwitchId", description="SwitchId (serial number) of the switch" ) + public_key: str = Field(..., alias="publicKey", description="Public Key") + finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") + dhcp_bootstrap_ip: Optional[str] = Field(default=None, alias="dhcpBootstrapIp") + seed_switch: bool = Field(default=False, alias="seedSwitch") data: Optional[Dict[str, Any]] = Field( default=None, - description="Bootstrap configuration data block (gatewayIpMask, models)" + description="Bootstrap configuration data block (gatewayIpMask, models)", ) - @field_validator('gateway_ip_mask', mode='before') + @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: str) -> str: result = SwitchValidators.validate_cidr(v) @@ -131,7 +97,7 @@ def validate_gateway(cls, v: str) -> str: raise ValueError("gateway_ip_mask cannot be empty") return result - @field_validator('hostname', mode='before') + @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: result = SwitchValidators.validate_hostname(v) @@ -139,7 +105,7 @@ def validate_host(cls, v: str) -> str: raise ValueError("hostname cannot be empty") return result - @field_validator('ip', 'dhcp_bootstrap_ip', mode='before') + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") @classmethod def validate_ip(cls, v: Optional[str]) -> Optional[str]: if v is None: @@ -149,7 +115,7 @@ def validate_ip(cls, v: Optional[str]) -> Optional[str]: raise ValueError(f"Invalid IP address: {v}") return result - @field_validator('new_switch_id', mode='before') + @field_validator("new_switch_id", mode="before") @classmethod def validate_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -163,7 +129,7 @@ def use_new_credentials(self) -> bool: """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" return bool(self.discovery_username and self.discovery_password) - @model_validator(mode='after') + @model_validator(mode="after") def validate_rma_credentials(self) -> Self: """Validate RMA credential configuration logic.""" if self.use_new_credentials: diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index 8c1d7bb6..e3fe9105 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -19,7 +19,9 @@ from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import SwitchValidators +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) class SwitchCredentialsRequestModel(NDBaseModel): @@ -29,37 +31,36 @@ class SwitchCredentialsRequestModel(NDBaseModel): Supports local credentials or remote credential store (such as CyberArk). Path: POST /api/v1/manage/credentials/switches """ + identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "singleton" switch_ids: List[str] = Field( ..., alias="switchIds", min_length=1, - description="List of switch serial numbers" + description="List of switch serial numbers", ) switch_username: Optional[str] = Field( - default=None, - alias="switchUsername", - description="Switch username" + default=None, alias="switchUsername", description="Switch username" ) switch_password: Optional[str] = Field( - default=None, - alias="switchPassword", - description="Switch password" + default=None, alias="switchPassword", description="Switch password" ) remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", - description="Remote credential store key (e.g. CyberArk path)" + description="Remote credential store key (e.g. CyberArk path)", ) remote_credential_store_type: Optional[str] = Field( default=None, alias="remoteCredentialStoreType", - description="Remote credential store type (e.g. 'cyberark')" + description="Remote credential store type (e.g. 'cyberark')", ) - @field_validator('switch_ids', mode='before') + @field_validator("switch_ids", mode="before") @classmethod def validate_switch_ids(cls, v: List[str]) -> List[str]: """Validate all switch IDs.""" @@ -74,11 +75,16 @@ def validate_switch_ids(cls, v: List[str]) -> List[str]: raise ValueError("No valid switch IDs provided") return validated - @model_validator(mode='after') + @model_validator(mode="after") def validate_credentials(self) -> Self: """Ensure either local or remote credentials are provided.""" - has_local = self.switch_username is not None and self.switch_password is not None - has_remote = self.remote_credential_store_key is not None and self.remote_credential_store_type is not None + has_local = ( + self.switch_username is not None and self.switch_password is not None + ) + has_remote = ( + self.remote_credential_store_key is not None + and self.remote_credential_store_type is not None + ) if not has_local and not has_remote: raise ValueError( "Either local credentials (switchUsername + switchPassword) " @@ -93,15 +99,14 @@ class ChangeSwitchSerialNumberRequestModel(NDBaseModel): Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/changeSwitchSerialNumber """ + identifiers: ClassVar[List[str]] = ["new_switch_id"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - new_switch_id: str = Field( - ..., - alias="newSwitchId", - description="New switchId" - ) + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" + new_switch_id: str = Field(..., alias="newSwitchId", description="New switchId") - @field_validator('new_switch_id', mode='before') + @field_validator("new_switch_id", mode="before") @classmethod def validate_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 9be8b22d..2bf8de01 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -18,7 +18,9 @@ from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel -from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( AdvisoryLevel, @@ -38,29 +40,26 @@ class TelemetryIpCollection(NDNestedModel): """ Inband and out-of-band telemetry IP addresses for a switch. """ + identifiers: ClassVar[List[str]] = [] inband_ipv4_address: Optional[str] = Field( - default=None, - alias="inbandIpV4Address", - description="Inband IPv4 address" + default=None, alias="inbandIpV4Address", description="Inband IPv4 address" ) inband_ipv6_address: Optional[str] = Field( - default=None, - alias="inbandIpV6Address", - description="Inband IPv6 address" + default=None, alias="inbandIpV6Address", description="Inband IPv6 address" ) out_of_band_ipv4_address: Optional[str] = Field( default=None, alias="outOfBandIpV4Address", - description="Out of band IPv4 address" + description="Out of band IPv4 address", ) out_of_band_ipv6_address: Optional[str] = Field( default=None, alias="outOfBandIpV6Address", - description="Out of band IPv6 address" + description="Out of band IPv6 address", ) - @field_validator('inband_ipv4_address', 'out_of_band_ipv4_address', mode='before') + @field_validator("inband_ipv4_address", "out_of_band_ipv4_address", mode="before") @classmethod def validate_ipv4(cls, v: Optional[str]) -> Optional[str]: return SwitchValidators.validate_ip_address(v) @@ -70,51 +69,38 @@ class VpcData(NDNestedModel): """ vPC pair configuration and operational status for a switch. """ + identifiers: ClassVar[List[str]] = [] vpc_domain: int = Field( - ..., - alias="vpcDomain", - ge=1, - le=1000, - description="vPC domain ID" + ..., alias="vpcDomain", ge=1, le=1000, description="vPC domain ID" ) peer_switch_id: str = Field( - ..., - alias="peerSwitchId", - description="vPC peer switch serial number" + ..., alias="peerSwitchId", description="vPC peer switch serial number" ) consistent_status: Optional[bool] = Field( default=None, alias="consistentStatus", - description="Flag to indicate the vPC status is consistent" + description="Flag to indicate the vPC status is consistent", ) intended_peer_name: Optional[str] = Field( default=None, alias="intendedPeerName", - description="Intended vPC host name for pre-provisioned peer switch" + description="Intended vPC host name for pre-provisioned peer switch", ) keep_alive_status: Optional[str] = Field( - default=None, - alias="keepAliveStatus", - description="vPC peer keep alive status" + default=None, alias="keepAliveStatus", description="vPC peer keep alive status" ) peer_link_status: Optional[str] = Field( - default=None, - alias="peerLinkStatus", - description="vPC peer link status" + default=None, alias="peerLinkStatus", description="vPC peer link status" ) peer_name: Optional[str] = Field( - default=None, - alias="peerName", - description="vPC peer switch name" + default=None, alias="peerName", description="vPC peer switch name" ) vpc_role: Optional[VpcRole] = Field( - default=None, - alias="vpcRole", - description="The vPC role" + default=None, alias="vpcRole", description="The vPC role" ) - @field_validator('peer_switch_id', mode='before') + @field_validator("peer_switch_id", mode="before") @classmethod def validate_peer_serial(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -127,16 +113,13 @@ class SwitchMetadata(NDNestedModel): """ Internal database identifiers associated with a switch record. """ + identifiers: ClassVar[List[str]] = [] switch_db_id: Optional[int] = Field( - default=None, - alias="switchDbId", - description="Database Id of the switch" + default=None, alias="switchDbId", description="Database Id of the switch" ) switch_uuid: Optional[str] = Field( - default=None, - alias="switchUuid", - description="Internal unique Id of the switch" + default=None, alias="switchUuid", description="Internal unique Id of the switch" ) @@ -144,161 +127,119 @@ class AdditionalSwitchData(NDNestedModel): """ Platform-specific additional data for NX-OS switches. """ + identifiers: ClassVar[List[str]] = [] usage: Optional[str] = Field( - default="others", - description="The usage of additional data" + default="others", description="The usage of additional data" ) config_sync_status: Optional[ConfigSyncStatus] = Field( - default=None, - alias="configSyncStatus", - description="Configuration sync status" + default=None, alias="configSyncStatus", description="Configuration sync status" ) discovery_status: Optional[DiscoveryStatus] = Field( - default=None, - alias="discoveryStatus", - description="Discovery status" + default=None, alias="discoveryStatus", description="Discovery status" ) domain_name: Optional[str] = Field( - default=None, - alias="domainName", - description="Domain name" + default=None, alias="domainName", description="Domain name" ) smart_switch: Optional[bool] = Field( default=None, alias="smartSwitch", - description="Flag that indicates if the switch is equipped with DPUs or not" + description="Flag that indicates if the switch is equipped with DPUs or not", ) hypershield_connectivity_status: Optional[str] = Field( default=None, alias="hypershieldConnectivityStatus", - description="Smart switch connectivity status to hypershield controller" + description="Smart switch connectivity status to hypershield controller", ) hypershield_tenant: Optional[str] = Field( - default=None, - alias="hypershieldTenant", - description="Hypershield tenant name" + default=None, alias="hypershieldTenant", description="Hypershield tenant name" ) hypershield_integration_name: Optional[str] = Field( default=None, alias="hypershieldIntegrationName", - description="Hypershield Integration Id" + description="Hypershield Integration Id", ) source_interface_name: Optional[str] = Field( default=None, alias="sourceInterfaceName", - description="Source interface for switch discovery" + description="Source interface for switch discovery", ) source_vrf_name: Optional[str] = Field( default=None, alias="sourceVrfName", - description="Source VRF for switch discovery" + description="Source VRF for switch discovery", ) platform_type: Optional[PlatformType] = Field( - default=None, - alias="platformType", - description="Platform type of the switch" + default=None, alias="platformType", description="Platform type of the switch" ) discovered_system_mode: Optional[SystemMode] = Field( - default=None, - alias="discoveredSystemMode", - description="Discovered system mode" + default=None, alias="discoveredSystemMode", description="Discovered system mode" ) intended_system_mode: Optional[SystemMode] = Field( - default=None, - alias="intendedSystemMode", - description="Intended system mode" + default=None, alias="intendedSystemMode", description="Intended system mode" ) scalable_unit: Optional[str] = Field( - default=None, - alias="scalableUnit", - description="Name of the scalable unit" + default=None, alias="scalableUnit", description="Name of the scalable unit" ) system_mode: Optional[SystemMode] = Field( - default=None, - alias="systemMode", - description="System mode" - ) - vendor: Optional[str] = Field( - default=None, - description="Vendor of the switch" - ) - username: Optional[str] = Field( - default=None, - description="Discovery user name" + default=None, alias="systemMode", description="System mode" ) + vendor: Optional[str] = Field(default=None, description="Vendor of the switch") + username: Optional[str] = Field(default=None, description="Discovery user name") remote_credential_store: Optional[RemoteCredentialStore] = Field( - default=None, - alias="remoteCredentialStore" - ) - meta: Optional[SwitchMetadata] = Field( - default=None, - description="Switch metadata" + default=None, alias="remoteCredentialStore" ) + meta: Optional[SwitchMetadata] = Field(default=None, description="Switch metadata") class AdditionalAciSwitchData(NDNestedModel): """ Platform-specific additional data for ACI leaf and spine switches. """ + identifiers: ClassVar[List[str]] = [] usage: Optional[str] = Field( - default="aci", - description="The usage of additional data" + default="aci", description="The usage of additional data" ) admin_status: Optional[Literal["inService", "outOfService"]] = Field( - default=None, - alias="adminStatus", - description="Admin status" + default=None, alias="adminStatus", description="Admin status" ) health_score: Optional[int] = Field( default=None, alias="healthScore", ge=1, le=100, - description="Switch health score" + description="Switch health score", ) last_reload_time: Optional[str] = Field( default=None, alias="lastReloadTime", - description="Timestamp when the system is last reloaded" + description="Timestamp when the system is last reloaded", ) last_software_update_time: Optional[str] = Field( default=None, alias="lastSoftwareUpdateTime", - description="Timestamp when the software is last updated" + description="Timestamp when the software is last updated", ) node_id: Optional[int] = Field( - default=None, - alias="nodeId", - ge=1, - description="Node ID" + default=None, alias="nodeId", ge=1, description="Node ID" ) node_status: Optional[Literal["active", "inActive"]] = Field( - default=None, - alias="nodeStatus", - description="Node status" + default=None, alias="nodeStatus", description="Node status" ) pod_id: Optional[int] = Field( - default=None, - alias="podId", - ge=1, - description="Pod ID" + default=None, alias="podId", ge=1, description="Pod ID" ) remote_leaf_group_name: Optional[str] = Field( - default=None, - alias="remoteLeafGroupName", - description="Remote leaf group name" + default=None, alias="remoteLeafGroupName", description="Remote leaf group name" ) switch_added: Optional[str] = Field( default=None, alias="switchAdded", - description="Timestamp when the switch is added" + description="Timestamp when the switch is added", ) tep_pool: Optional[str] = Field( - default=None, - alias="tepPool", - description="TEP IP pool" + default=None, alias="tepPool", description="TEP IP pool" ) @@ -306,11 +247,11 @@ class Metadata(NDNestedModel): """ Pagination and result-count metadata from a list API response. """ + identifiers: ClassVar[List[str]] = [] counts: Optional[Dict[str, int]] = Field( - default=None, - description="Count information including total and remaining" + default=None, description="Count information including total and remaining" ) @@ -320,88 +261,64 @@ class SwitchDataModel(NDBaseModel): Path: GET /fabrics/{fabricName}/switches """ + identifiers: ClassVar[List[str]] = ["switch_id"] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + identifier_strategy: ClassVar[ + Optional[Literal["single", "composite", "hierarchical", "singleton"]] + ] = "single" switch_id: str = Field( ..., alias="switchId", - description="Serial number of Switch or Node Id of ACI switch" + description="Serial number of Switch or Node Id of ACI switch", ) serial_number: Optional[str] = Field( default=None, alias="serialNumber", - description="Serial number of switch or APIC controller node" - ) - additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = Field( - default=None, - alias="additionalData", - description="Additional switch data" + description="Serial number of switch or APIC controller node", ) - advisory_level: Optional[AdvisoryLevel] = Field( - default=None, - alias="advisoryLevel" - ) - anomaly_level: Optional[AnomalyLevel] = Field( - default=None, - alias="anomalyLevel" - ) - alert_suspend: Optional[str] = Field( - default=None, - alias="alertSuspend" + additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = ( + Field( + default=None, alias="additionalData", description="Additional switch data" + ) ) + advisory_level: Optional[AdvisoryLevel] = Field(default=None, alias="advisoryLevel") + anomaly_level: Optional[AnomalyLevel] = Field(default=None, alias="anomalyLevel") + alert_suspend: Optional[str] = Field(default=None, alias="alertSuspend") fabric_management_ip: Optional[str] = Field( default=None, alias="fabricManagementIp", - description="Switch IPv4/v6 address used for management" + description="Switch IPv4/v6 address used for management", ) fabric_name: Optional[str] = Field( - default=None, - alias="fabricName", - description="Fabric name", - max_length=64 + default=None, alias="fabricName", description="Fabric name", max_length=64 ) fabric_type: Optional[str] = Field( - default=None, - alias="fabricType", - description="Fabric type" - ) - hostname: Optional[str] = Field( - default=None, - description="Switch host name" + default=None, alias="fabricType", description="Fabric type" ) + hostname: Optional[str] = Field(default=None, description="Switch host name") model: Optional[str] = Field( - default=None, - description="Model of switch or APIC controller node" + default=None, description="Model of switch or APIC controller node" ) software_version: Optional[str] = Field( default=None, alias="softwareVersion", - description="Software version of switch or APIC controller node" - ) - switch_role: Optional[SwitchRole] = Field( - default=None, - alias="switchRole" + description="Software version of switch or APIC controller node", ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") system_up_time: Optional[str] = Field( - default=None, - alias="systemUpTime", - description="System up time" + default=None, alias="systemUpTime", description="System up time" ) vpc_configured: Optional[bool] = Field( default=None, alias="vpcConfigured", - description="Flag to indicate switch is part of a vPC domain" - ) - vpc_data: Optional[VpcData] = Field( - default=None, - alias="vpcData" + description="Flag to indicate switch is part of a vPC domain", ) + vpc_data: Optional[VpcData] = Field(default=None, alias="vpcData") telemetry_ip_collection: Optional[TelemetryIpCollection] = Field( - default=None, - alias="telemetryIpCollection" + default=None, alias="telemetryIpCollection" ) - @field_validator('additional_data', mode='before') + @field_validator("additional_data", mode="before") @classmethod def parse_additional_data(cls, v: Any) -> Any: """Route additionalData to the correct nested model. @@ -412,11 +329,11 @@ def parse_additional_data(cls, v: Any) -> Any: """ if v is None or not isinstance(v, dict): return v - if 'usage' not in v: - v = {**v, 'usage': 'others'} + if "usage" not in v: + v = {**v, "usage": "others"} return v - @field_validator('switch_id', mode='before') + @field_validator("switch_id", mode="before") @classmethod def validate_switch_id(cls, v: str) -> str: result = SwitchValidators.validate_serial_number(v) @@ -424,7 +341,7 @@ def validate_switch_id(cls, v: str) -> str: raise ValueError("switch_id cannot be empty") return result - @field_validator('fabric_management_ip', mode='before') + @field_validator("fabric_management_ip", mode="before") @classmethod def validate_mgmt_ip(cls, v: Optional[str]) -> Optional[str]: return SwitchValidators.validate_ip_address(v) diff --git a/plugins/module_utils/models/manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py index b2e3a704..3dcdd3a6 100644 --- a/plugins/module_utils/models/manage_switches/validators.py +++ b/plugins/module_utils/models/manage_switches/validators.py @@ -42,7 +42,7 @@ def validate_cidr(v: Optional[str]) -> Optional[str]: v = str(v).strip() if not v: return None - if '/' not in v: + if "/" not in v: raise ValueError(f"CIDR notation required (IP/mask format): {v}") try: ip_network(v, strict=False) @@ -59,7 +59,7 @@ def validate_serial_number(v: Optional[str]) -> Optional[str]: if not v: return None # Serial numbers are typically alphanumeric with optional hyphens - if not re.match(r'^[A-Za-z0-9_-]+$', v): + if not re.match(r"^[A-Za-z0-9_-]+$", v): raise ValueError( f"Serial number must be alphanumeric with optional hyphens/underscores: {v}" ) @@ -77,12 +77,12 @@ def validate_hostname(v: Optional[str]) -> Optional[str]: if len(v) > 255: raise ValueError("Hostname cannot exceed 255 characters") # Allow alphanumeric, dots, hyphens, underscores - if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9._-]*$', v): + if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9._-]*$", v): raise ValueError( f"Invalid hostname format. Must start with alphanumeric and " f"contain only alphanumeric, dots, hyphens, underscores: {v}" ) - if v.startswith('.') or v.endswith('.') or '..' in v: + if v.startswith(".") or v.endswith(".") or ".." in v: raise ValueError(f"Invalid hostname format (dots): {v}") return v @@ -95,7 +95,7 @@ def validate_mac_address(v: Optional[str]) -> Optional[str]: if not v: return None # Accept colon or hyphen separated MAC addresses - mac_pattern = r'^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + mac_pattern = r"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" if not re.match(mac_pattern, v): raise ValueError(f"Invalid MAC address format: {v}") return v diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 877ac868..0ef11d1d 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -426,8 +426,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel -from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import NDSwitchResourceModule +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import ( + NDSwitchResourceModule, +) from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( NDModule, NDModuleError, @@ -456,7 +460,6 @@ def main(): # Initialize logging try: log_config = Log() - log_config.config = "/Users/achengam/Documents/Ansible_Dev/NDBranch/ansible_collections/cisco/nd/ansible_cisco_log_r.json" log_config.commit() # Create logger instance for this module log = logging.getLogger("nd.nd_manage_switches") @@ -476,11 +479,7 @@ def main(): nd = NDModule(module) # Create NDSwitchResourceModule - sw_module = NDSwitchResourceModule( - nd=nd, - results=results, - logger=log - ) + sw_module = NDSwitchResourceModule(nd=nd, results=results, logger=log) # Manage state for merged, overridden, deleted sw_module.manage_state() @@ -541,6 +540,7 @@ def main(): if output_level == "debug": import traceback + results.final_result["traceback"] = traceback.format_exc() module.fail_json(msg=str(error), **results.final_result) From 00298ccd255283d47610a9c7798dcdfd603ab7bf Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 12:01:47 +0530 Subject: [PATCH 076/109] Import Fixes + ArgSpec Changes --- plugins/action/nd_switches_validate.py | 13 ++- .../module_utils/common/pydantic_compat.py | 15 ++- .../manage_switches/nd_switch_resources.py | 4 +- .../manage_switches/bootstrap_models.py | 7 +- .../models/manage_switches/config_models.py | 104 +++++++++++++++++- .../manage_switches/discovery_models.py | 5 +- .../manage_switches/preprovision_models.py | 6 +- .../models/manage_switches/rma_models.py | 7 +- .../manage_switches/switch_actions_models.py | 6 +- .../manage_switches/switch_data_models.py | 5 +- plugins/modules/nd_manage_switches.py | 5 + 11 files changed, 161 insertions(+), 16 deletions(-) diff --git a/plugins/action/nd_switches_validate.py b/plugins/action/nd_switches_validate.py index ed0c4b47..648e1288 100644 --- a/plugins/action/nd_switches_validate.py +++ b/plugins/action/nd_switches_validate.py @@ -26,12 +26,13 @@ from ansible.plugins.action import ActionBase from ansible.utils.display import Display - -try: - from pydantic import BaseModel, ValidationError, field_validator, model_validator - HAS_PYDANTIC = True -except ImportError: - HAS_PYDANTIC = False +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + HAS_PYDANTIC, + ValidationError, + field_validator, + model_validator, +) try: from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel diff --git a/plugins/module_utils/common/pydantic_compat.py b/plugins/module_utils/common/pydantic_compat.py index b26559d2..aeaa1a56 100644 --- a/plugins/module_utils/common/pydantic_compat.py +++ b/plugins/module_utils/common/pydantic_compat.py @@ -48,6 +48,7 @@ StrictBool, SecretStr, ValidationError, + ValidationInfo, field_serializer, model_serializer, field_validator, @@ -73,6 +74,7 @@ StrictBool, SecretStr, ValidationError, + ValidationInfo, field_serializer, model_serializer, field_validator, @@ -117,8 +119,10 @@ def ConfigDict(**kwargs) -> dict: # pylint: disable=unused-argument,invalid-nam return kwargs # Fallback: Field that does nothing - def Field(**kwargs) -> Any: # pylint: disable=unused-argument,invalid-name + def Field(*args, **kwargs) -> Any: # pylint: disable=unused-argument,invalid-name """Pydantic Field fallback when pydantic is not available.""" + if args: + return args[0] if "default_factory" in kwargs: return kwargs["default_factory"]() return kwargs.get("default") @@ -191,6 +195,14 @@ def __init__(self, message="A custom error occurred."): def __str__(self): return f"ValidationError: {self.message}" + # Fallback: ValidationInfo placeholder class that does nothing + class ValidationInfo: + """Pydantic ValidationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + # Fallback: model_validator decorator that does nothing def model_validator(*args, **kwargs): # pylint: disable=unused-argument """Pydantic model_validator fallback when pydantic is not available.""" @@ -276,6 +288,7 @@ def main(): "StrictBool", "SecretStr", "ValidationError", + "ValidationInfo", "field_serializer", "model_serializer", "field_validator", diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index cd067828..69935cd4 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -19,7 +19,9 @@ from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union -from pydantic import ValidationError +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ValidationError, +) from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index f259cc6c..d306dd17 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -13,10 +13,15 @@ __metaclass__ = type -from pydantic import Field, computed_field, field_validator, model_validator from typing import Any, Dict, List, Optional, ClassVar, Literal from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, + model_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( NDNestedModel, diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 0f612b39..e4fe3029 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -17,7 +17,7 @@ import socket from ipaddress import ip_address -from pydantic import ( +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, ValidationInfo, computed_field, @@ -694,7 +694,107 @@ def get_argument_spec(cls) -> Dict[str, Any]: ), save=dict(type="bool", default=True), deploy=dict(type="bool", default=True), - config=dict(type="list", elements="dict"), + config=dict( + type="list", + elements="dict", + options=dict( + seed_ip=dict(type="str", required=True), + username=dict(type="str", default="admin"), + password=dict(type="str", no_log=True), + auth_proto=dict( + type="str", + default="MD5", + choices=[ + "MD5", + "SHA", + "MD5_DES", + "MD5_AES", + "SHA_DES", + "SHA_AES", + ], + ), + role=dict( + type="str", + default="leaf", + choices=[ + "leaf", + "spine", + "border", + "border_spine", + "border_gateway", + "border_gateway_spine", + "super_spine", + "border_super_spine", + "border_gateway_super_spine", + "access", + "aggregation", + "edge_router", + "core_router", + "tor", + ], + ), + preserve_config=dict(type="bool", default=False), + platform_type=dict( + type="str", + default="nx-os", + choices=["nx-os", "ios-xe"], + ), + poap=dict( + type="dict", + options=dict( + serial_number=dict(type="str", required=True), + hostname=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + image_policy=dict(type="str"), + ), + ), + preprovision=dict( + type="dict", + options=dict( + serial_number=dict(type="str", required=True), + model=dict(type="str", required=True), + version=dict(type="str", required=True), + hostname=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + image_policy=dict(type="str"), + config_data=dict( + type="dict", + required=True, + options=dict( + models=dict( + type="list", + elements="str", + required=True, + ), + gateway=dict(type="str", required=True), + ), + ), + ), + ), + rma=dict( + type="list", + elements="dict", + options=dict( + new_serial_number=dict(type="str", required=True), + old_serial_number=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + model=dict(type="str"), + version=dict(type="str"), + image_policy=dict(type="str"), + config_data=dict( + type="dict", + options=dict( + models=dict(type="list", elements="str"), + gateway=dict(type="str"), + ), + ), + ), + ), + ), + ), ) diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index 21188515..fc42c8c7 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -13,10 +13,13 @@ __metaclass__ = type -from pydantic import Field, field_validator from typing import Any, Dict, List, Optional, ClassVar, Literal, Union from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index b19206bf..96e38588 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -14,10 +14,14 @@ __metaclass__ = type from ipaddress import ip_network -from pydantic import Field, computed_field, field_validator from typing import Any, Dict, List, Optional, ClassVar, Literal from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index e20555ac..d1f253e2 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -13,10 +13,15 @@ __metaclass__ = type -from pydantic import Field, computed_field, field_validator, model_validator from typing import Any, Dict, List, Optional, ClassVar, Literal from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, + model_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index e3fe9105..e0384dd6 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -13,10 +13,14 @@ __metaclass__ = type -from pydantic import Field, field_validator, model_validator from typing import Any, Dict, List, Literal, Optional, ClassVar from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, + model_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 2bf8de01..230c9215 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -13,10 +13,13 @@ __metaclass__ = type -from pydantic import Field, field_validator from typing import Any, Dict, List, Optional, ClassVar, Literal, Union from typing_extensions import Self +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, +) from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( NDNestedModel, diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 0ef11d1d..6778fd85 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -425,6 +425,9 @@ import logging from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + require_pydantic, +) from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( SwitchConfigModel, @@ -457,6 +460,8 @@ def main(): ], ) + require_pydantic(module) + # Initialize logging try: log_config = Log() From 87fb5a29f10e12248fd10557d0f419840a2d7f24 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 13:26:15 +0530 Subject: [PATCH 077/109] Sanity Fixes V1 --- plugins/action/nd_switches_validate.py | 1 - .../v1/manage/manage_fabrics_bootstrap.py | 2 +- .../module_utils/manage_switches/__init__.py | 34 ----- .../manage_switches/nd_switch_resources.py | 143 ++++++++++------- plugins/module_utils/manage_switches/utils.py | 80 +++++----- plugins/module_utils/models/__init__.py | 1 - .../models/manage_switches/__init__.py | 144 ------------------ .../manage_switches/bootstrap_models.py | 5 +- .../models/manage_switches/config_models.py | 15 +- .../manage_switches/discovery_models.py | 1 - .../manage_switches/preprovision_models.py | 3 +- .../models/manage_switches/rma_models.py | 5 +- .../manage_switches/switch_actions_models.py | 5 +- .../manage_switches/switch_data_models.py | 3 +- plugins/module_utils/utils.py | 29 ++-- plugins/modules/nd_manage_switches.py | 26 ++-- .../nd_manage_switches/tests/nd/deleted.yaml | 1 - tests/sanity/ignore-2.16.txt | 1 + .../test_endpoints_api_v1_manage_fabrics.py | 4 +- ...endpoints_api_v1_manage_fabrics_actions.py | 4 +- ...dpoints_api_v1_manage_fabrics_bootstrap.py | 2 +- ...dpoints_api_v1_manage_fabrics_inventory.py | 2 +- ...nts_api_v1_manage_fabrics_switchactions.py | 10 +- ...ndpoints_api_v1_manage_fabrics_switches.py | 13 +- 24 files changed, 199 insertions(+), 335 deletions(-) create mode 100644 tests/sanity/ignore-2.16.txt diff --git a/plugins/action/nd_switches_validate.py b/plugins/action/nd_switches_validate.py index 648e1288..becbe870 100644 --- a/plugins/action/nd_switches_validate.py +++ b/plugins/action/nd_switches_validate.py @@ -263,4 +263,3 @@ def run(self, tmp=None, task_vars=None): results["msg"] = "Validation Failed! Please check output above." return results - diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py index d7231abd..28f5c761 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py @@ -20,7 +20,7 @@ __author__ = "Akshayanat C S" # pylint: enable=invalid-name -from typing import Literal, Optional +from typing import Literal from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( diff --git a/plugins/module_utils/manage_switches/__init__.py b/plugins/module_utils/manage_switches/__init__.py index aa6dfd90..e69de29b 100644 --- a/plugins/module_utils/manage_switches/__init__.py +++ b/plugins/module_utils/manage_switches/__init__.py @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""nd_manage_switches package. - -Re-exports the orchestrator and utility classes so that consumers can -import directly from the package. -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import ( # noqa: F401 - NDSwitchResourceModule, -) -from ansible_collections.cisco.nd.plugins.module_utils.utils import ( # noqa: F401 - SwitchOperationError, - FabricUtils, -) -from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.utils import ( # noqa: F401 - PayloadUtils, - SwitchWaitUtils, - mask_password, - get_switch_field, - determine_operation_type, - group_switches_by_credentials, - query_bootstrap_switches, - build_bootstrap_index, - build_poap_data_block, -) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 69935cd4..e1523276 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -15,7 +15,6 @@ __metaclass__ = type import logging -from copy import deepcopy from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union @@ -30,25 +29,39 @@ ) from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches import ( +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( SwitchRole, SnmpV3AuthProtocol, PlatformType, DiscoveryStatus, SystemMode, ConfigSyncStatus, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.discovery_models import ( SwitchDiscoveryModel, - SwitchDataModel, AddSwitchesRequestModel, ShallowDiscoveryRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( + SwitchDataModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.bootstrap_models import ( BootstrapImportSwitchModel, ImportBootstrapSwitchesRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.preprovision_models import ( PreProvisionSwitchModel, PreProvisionSwitchesRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.rma_models import ( RMASwitchModel, - SwitchConfigModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_actions_models import ( SwitchCredentialsRequestModel, ChangeSwitchSerialNumberRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel, RMAConfigModel, @@ -313,13 +326,17 @@ def compute_changes( if prop_dict.get(k) != existing_dict.get(k) } log.info( - f"Switch {ip} has differences — marking to_update. " - f"Changed fields: {diff_keys}" + "Switch %s has differences — marking to_update. Changed fields: %s", + ip, + diff_keys, ) + proposed_diff = {k: prop_dict.get(k) for k in diff_keys} + existing_diff = {k: existing_dict.get(k) for k in diff_keys} log.debug( - f"Switch {ip} diff detail — " - f"proposed: { {k: prop_dict.get(k) for k in diff_keys} }, " - f"existing: { {k: existing_dict.get(k) for k in diff_keys} }" + "Switch %s diff detail — proposed: %s, existing: %s", + ip, + proposed_diff, + existing_diff, ) changes["to_update"].append(prop_sw) @@ -477,7 +494,7 @@ def discover( log.debug("Step 2: Bulk discovering switches") all_discovered: Dict[str, Dict[str, Any]] = {} for group_key, switches in credential_groups.items(): - username, _, auth_proto, platform_type, _ = group_key + username, _pw_hash, auth_proto, platform_type, _preserve = group_key password = switches[0].password log.debug( @@ -793,7 +810,7 @@ def bulk_add( ) payload = add_request.to_payload() - serial_numbers = [d.get("serialNumber") for _, d in switches] + serial_numbers = [d.get("serialNumber") for _cfg, d in switches] log.info( f"Bulk adding {len(switches)} switches to fabric " f"{self.ctx.fabric}: {', '.join(serial_numbers)}" @@ -1086,7 +1103,7 @@ def post_add_processing( """ nd = self.ctx.nd log = self.ctx.log - all_serials = [sn for sn, _ in switch_actions] + all_serials = [sn for sn, _cfg in switch_actions] log.info( f"Waiting for {len(all_serials)} {context} " @@ -1253,9 +1270,9 @@ def handle( results.response_current = {"MESSAGE": "check mode — skipped"} results.result_current = {"success": True, "changed": False} results.diff_current = { - "bootstrap": [cfg.seed_ip for cfg, _ in bootstrap_entries], - "preprovision": [cfg.seed_ip for cfg, _ in preprov_entries], - "swap": [cfg.seed_ip for cfg, _ in swap_entries], + "bootstrap": [cfg.seed_ip for cfg, _sw in bootstrap_entries], + "preprovision": [cfg.seed_ip for cfg, _sw in preprov_entries], + "swap": [cfg.seed_ip for cfg, _sw in swap_entries], } results.register_api_call() return @@ -1887,7 +1904,7 @@ def _handle_poap_swap( log.info( f"POAP swap completed successfully for {len(swap_entries)} " - f"switch(es): {[sn for sn, _ in switch_actions]}" + f"switch(es): {[sn for sn, _cfg in switch_actions]}" ) log.debug("EXIT: _handle_poap_swap()") @@ -2041,7 +2058,7 @@ def handle( # RMA-specific wait (unreachable → ok) instead of the generic # wait_for_switch_manageable which would time out on the # migration-mode phase. - all_new_serials = [sn for sn, _ in switch_actions] + all_new_serials = [sn for sn, _cfg in switch_actions] log.info( f"Waiting for {len(all_new_serials)} RMA replacement " f"switch(es) to become ready: {all_new_serials}" @@ -2399,7 +2416,7 @@ def __init__( self.poap_handler = POAPHandler(self.ctx, self.fabric_ops, self.wait_utils) self.rma_handler = RMAHandler(self.ctx, self.fabric_ops, self.wait_utils) - log.info(f"Initialized NDSwitchResourceModule for fabric: {self.fabric}") + log.info("Initialized NDSwitchResourceModule for fabric: %s", self.fabric) def exit_json(self) -> None: """Finalize collected results and exit the Ansible module. @@ -2457,7 +2474,7 @@ def manage_state(self) -> None: Returns: None. """ - self.log.info(f"Managing state: {self.state}") + self.log.info("Managing state: %s", self.state) # gathered — read-only, no config accepted if self.state == "gathered": @@ -2504,8 +2521,10 @@ def manage_state(self) -> None: self.output.assign(proposed=output_proposed) self.log.info( - f"Config partition: {len(normal_configs)} normal, " - f"{len(poap_configs)} poap, {len(rma_configs)} rma" + "Config partition: %s normal, %s poap, %s rma", + len(normal_configs), + len(poap_configs), + len(rma_configs), ) # POAP and RMA are only valid with state=merged @@ -2523,8 +2542,10 @@ def manage_state(self) -> None: ] if configs_to_discover: self.log.info( - f"Discovery needed for {len(configs_to_discover)}/{len(normal_configs)} " - f"switch(es) — {len(normal_configs) - len(configs_to_discover)} already in fabric" + "Discovery needed for %s/%s switch(es) — %s already in fabric", + len(configs_to_discover), + len(normal_configs), + len(normal_configs) - len(configs_to_discover), ) discovered_data = self.discovery.discover(configs_to_discover) else: @@ -2577,8 +2598,8 @@ def _handle_merged_state( """ self.log.debug("ENTER: _handle_merged_state()") self.log.info("Handling merged state") - self.log.debug(f"Proposed configs: {len(self.proposed)}") - self.log.debug(f"Existing switches: {len(self.existing)}") + self.log.debug("Proposed configs: %s", len(self.proposed)) + self.log.debug("Existing switches: %s", len(self.existing)) if not self.proposed: self.log.info("No configurations provided for merged state") @@ -2604,9 +2625,10 @@ def _handle_merged_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - f"Check mode: would add {len(switches_to_add)}, " - f"process {len(migration_switches)} migration switch(es), " - f"save_deploy_required={idempotent_save_req}" + "Check mode: would add %s, process %s migration switch(es), save_deploy_required=%s", + len(switches_to_add), + len(migration_switches), + idempotent_save_req, ) self.results.action = "merge" self.results.state = self.state @@ -2639,7 +2661,8 @@ def _handle_merged_state( add_configs.append(cfg) else: self.log.warning( - f"No config found for switch {sw.fabric_management_ip}, skipping add" + "No config found for switch %s, skipping add", + sw.fabric_management_ip, ) if add_configs: @@ -2661,7 +2684,8 @@ def _handle_merged_state( pairs.append((cfg, disc)) else: self.log.warning( - f"No discovery data for {cfg.seed_ip}, skipping" + "No discovery data for %s, skipping", + cfg.seed_ip, ) if not pairs: @@ -2675,7 +2699,7 @@ def _handle_merged_state( platform_type=platform_type, preserve_config=preserve_config, ) - _bulk_added_ips.update(cfg.seed_ip for cfg, _ in pairs) + _bulk_added_ips.update(cfg.seed_ip for cfg, _disc in pairs) for cfg, disc in pairs: sn = disc.get("serialNumber") @@ -2712,7 +2736,7 @@ def _handle_merged_state( # preserve_config=True the switches will NOT reload after being # added to the fabric. Passing this flag lets the wait utility # skip the unreachable/reload detection phases. - all_preserve_config = all(cfg.preserve_config for _, cfg in switch_actions) + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) if all_preserve_config: self.log.info( "All switches in batch are brownfield (preserve_config=True) — " @@ -2829,9 +2853,11 @@ def _handle_overridden_state( n_add = len(diff.get("to_add", [])) n_migrate = len(diff.get("migration_mode", [])) self.log.info( - f"Check mode: would delete {n_delete}, " - f"delete-and-re-add {n_update}, " - f"add {n_add}, migrate {n_migrate}" + "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", + n_delete, + n_update, + n_add, + n_migrate, ) self.results.action = "override" self.results.state = self.state @@ -2855,8 +2881,9 @@ def _handle_overridden_state( # Phase 1: Switches not in proposed config for sw in diff.get("to_delete", []): self.log.info( - f"Marking for deletion (not in proposed): " - f"{sw.fabric_management_ip} ({sw.switch_id})" + "Marking for deletion (not in proposed): %s (%s)", + sw.fabric_management_ip, + sw.switch_id, ) switches_to_delete.append(sw) self._log_operation("delete", sw.fabric_management_ip) @@ -2874,8 +2901,9 @@ def _handle_overridden_state( ) if existing_sw: self.log.info( - f"Marking for deletion (re-add update): " - f"{existing_sw.fabric_management_ip} ({existing_sw.switch_id})" + "Marking for deletion (re-add update): %s (%s)", + existing_sw.fabric_management_ip, + existing_sw.switch_id, ) switches_to_delete.append(existing_sw) self._log_operation( @@ -2905,9 +2933,9 @@ def _handle_overridden_state( ] if configs_needing_rediscovery: self.log.info( - f"Re-discovering {len(configs_needing_rediscovery)} switch(es) " - f"after deletion for re-add: " - f"{[cfg.seed_ip for cfg in configs_needing_rediscovery]}" + "Re-discovering %s switch(es) after deletion for re-add: %s", + len(configs_needing_rediscovery), + [cfg.seed_ip for cfg in configs_needing_rediscovery], ) fresh_discovered = self.discovery.discover(configs_needing_rediscovery) discovered_data = {**(discovered_data or {}), **fresh_discovered} @@ -2927,10 +2955,10 @@ def _handle_gathered_state(self) -> None: None. """ self.log.debug("ENTER: _handle_gathered_state()") - self.log.info(f"Gathering inventory for fabric '{self.fabric}'") + self.log.info("Gathering inventory for fabric '%s'", self.fabric) if not self.existing: - self.log.info(f"Fabric '{self.fabric}' has no switches in inventory") + self.log.info("Fabric '%s' has no switches in inventory", self.fabric) self.results.action = "gathered" self.results.state = self.state @@ -2941,7 +2969,9 @@ def _handle_gathered_state(self) -> None: self.results.register_api_call() self.log.info( - f"Gathered {len(list(self.existing))} switch(es) from fabric '{self.fabric}'" + "Gathered %s switch(es) from fabric '%s'", + len(list(self.existing)), + self.fabric, ) self.log.debug("EXIT: _handle_gathered_state()") @@ -2985,13 +3015,17 @@ def _handle_deleted_state( ) if existing_switch: self.log.info( - f"Marking for deletion: {identifier} ({existing_switch.switch_id})" + "Marking for deletion: %s (%s)", + identifier, + existing_switch.switch_id, ) switches_to_delete.append(existing_switch) else: - self.log.info(f"Switch not found for deletion: {identifier}") + self.log.info("Switch not found for deletion: %s", identifier) - self.log.info(f"Total switches marked for deletion: {len(switches_to_delete)}") + self.log.info( + "Total switches marked for deletion: %s", len(switches_to_delete) + ) if not switches_to_delete: self.log.info("No switches to delete") return @@ -2999,7 +3033,7 @@ def _handle_deleted_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - f"Check mode: would delete {len(switches_to_delete)} switch(es)" + "Check mode: would delete %s switch(es)", len(switches_to_delete) ) self.results.action = "delete" self.results.state = self.state @@ -3016,7 +3050,8 @@ def _handle_deleted_state( return self.log.info( - f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" + "Proceeding to delete %s switch(es) from fabric", + len(switches_to_delete), ) self.fabric_ops.bulk_delete(switches_to_delete) for sw in switches_to_delete: @@ -3035,8 +3070,8 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: """ endpoint = EpManageFabricsSwitchesGet() endpoint.fabric_name = self.fabric - self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") - self.log.debug(f"Query verb: {endpoint.verb}") + self.log.debug("Querying all switches with endpoint: %s", endpoint.path) + self.log.debug("Query verb: %s", endpoint.verb) try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) @@ -3052,7 +3087,9 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: else: switches = [] - self.log.debug(f"Queried {len(switches)} switches from fabric {self.fabric}") + self.log.debug( + "Queried %s switches from fabric %s", len(switches), self.fabric + ) return switches # ===================================================================== diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index 0f41f35c..36c02aef 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -16,7 +16,7 @@ import logging import time from copy import deepcopy -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( EpManageFabricsBootstrapGet, @@ -200,11 +200,13 @@ def group_switches_by_credentials( groups.setdefault(group_key, []).append(switch) log.info( - f"Grouped {len(switches)} switches into " f"{len(groups)} credential group(s)" + "Grouped %s switches into %s credential group(s)", + len(switches), + len(groups), ) for idx, (key, group_switches) in enumerate(groups.items(), 1): - username, _, auth_proto, platform_type, preserve_config = key + username, _pw_hash, auth_proto, platform_type, preserve_config = key auth_value = ( auth_proto.value if hasattr(auth_proto, "value") else str(auth_proto) ) @@ -247,7 +249,7 @@ def query_bootstrap_switches( endpoint = EpManageFabricsBootstrapGet() endpoint.fabric_name = fabric - log.debug(f"Bootstrap endpoint: {endpoint.path}") + log.debug("Bootstrap endpoint: %s", endpoint.path) try: result = nd.request( @@ -266,7 +268,7 @@ def query_bootstrap_switches( else: switches = [] - log.info(f"Bootstrap API returned {len(switches)} " f"switch(es) in POAP loop") + log.info("Bootstrap API returned %s switch(es) in POAP loop", len(switches)) log.debug("EXIT: query_bootstrap_switches()") return switches @@ -425,7 +427,7 @@ def wait_for_switch_manageable( Returns: ``True`` if all switches are manageable, ``False`` on timeout. """ - self.log.info(f"Waiting for switches to become manageable: {serial_numbers}") + self.log.info("Waiting for switches to become manageable: %s", serial_numbers) # Phase 1 + 2: migration → normal if not self._wait_for_system_mode(serial_numbers): @@ -441,9 +443,7 @@ def wait_for_switch_manageable( # Phase 4: greenfield shortcut (skipped for POAP bootstrap) if not skip_greenfield_check and self._is_greenfield_debug_enabled(): - self.log.info( - "Greenfield debug flag enabled — " "skipping reload detection" - ) + self.log.info("Greenfield debug flag enabled — skipping reload detection") return True if skip_greenfield_check: @@ -511,25 +511,28 @@ def wait_for_discovery( attempts = max_attempts or 30 interval = wait_interval or self.wait_interval - self.log.info(f"Waiting for discovery of: {seed_ip}") + self.log.info("Waiting for discovery of: %s", seed_ip) for attempt in range(attempts): status = self._get_discovery_status(seed_ip) if status and status.get("status") in self.MANAGEABLE_STATUSES: - self.log.info(f"Discovery completed for {seed_ip}") + self.log.info("Discovery completed for %s", seed_ip) return status if status and status.get("status") in self.FAILED_STATUSES: - self.log.error(f"Discovery failed for {seed_ip}: {status}") + self.log.error("Discovery failed for %s: %s", seed_ip, status) return None self.log.debug( - f"Discovery attempt {attempt + 1}/{attempts} " f"for {seed_ip}" + "Discovery attempt %s/%s for %s", + attempt + 1, + attempts, + seed_ip, ) time.sleep(interval) - self.log.warning(f"Discovery timeout for {seed_ip}") + self.log.warning("Discovery timeout for %s", seed_ip) return None # ===================================================================== @@ -564,9 +567,7 @@ def _wait_for_system_mode(self, serial_numbers: List[str]) -> bool: if pending is None: return False - self.log.info( - "All switches in normal system mode — " "proceeding to discovery checks" - ) + self.log.info("All switches in normal system mode — proceeding to discovery checks") return True def _poll_system_mode( @@ -603,18 +604,21 @@ def _poll_system_mode( ) if not remaining: - self.log.info(f"All switches {label} mode (attempt {attempt})") + self.log.info("All switches %s mode (attempt %s)", label, attempt) return remaining pending = remaining self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: " - f"{len(pending)} switch(es) waiting to " - f"{label}: {pending}" + "Attempt %s/%s: %s switch(es) waiting to %s: %s", + attempt, + self.max_attempts, + len(pending), + label, + pending, ) time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) - self.log.warning(f"Timeout waiting for switches to {label}: {pending}") + self.log.warning("Timeout waiting for switches to %s: %s", label, pending) return None # ===================================================================== @@ -732,14 +736,17 @@ def _wait_for_discovery_state( self._trigger_rediscovery(pending) self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: " - f"{len(pending)} switch(es) not yet " - f"'{target_state}': {pending}" + "Attempt %s/%s: %s switch(es) not yet '%s': %s", + attempt, + self.max_attempts, + len(pending), + target_state, + pending, ) time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) self.log.warning( - f"Timeout waiting for '{target_state}' state: " f"{serial_numbers}" + "Timeout waiting for '%s' state: %s", target_state, serial_numbers ) return False @@ -791,12 +798,15 @@ def _wait_for_switches_in_fabric( return True self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: " - f"{len(pending)} switch(es) not yet in fabric: {pending}" + "Attempt %s/%s: %s switch(es) not yet in fabric: %s", + attempt, + self.max_attempts, + len(pending), + pending, ) time.sleep(self.wait_interval) - self.log.warning(f"Timeout waiting for switches to appear in fabric: {pending}") + self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) return False def _fetch_switch_data( @@ -818,7 +828,7 @@ def _fetch_switch_data( return None return switch_data except Exception as e: - self.log.error(f"Failed to fetch switch data: {e}") + self.log.error("Failed to fetch switch data: %s", e) return None def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: @@ -831,7 +841,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: return payload = {"switchIds": serial_numbers} - self.log.info(f"Triggering rediscovery for: {serial_numbers}") + self.log.info("Triggering rediscovery for: %s", serial_numbers) try: self.nd.request( self.ep_rediscover.path, @@ -839,7 +849,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: data=payload, ) except Exception as e: - self.log.warning(f"Failed to trigger rediscovery: {e}") + self.log.warning("Failed to trigger rediscovery: %s", e) def _get_discovery_status( self, @@ -863,7 +873,7 @@ def _get_discovery_status( return switch return None except Exception as e: - self.log.debug(f"Discovery status check failed: {e}") + self.log.debug("Discovery status check failed: %s", e) return None def _is_greenfield_debug_enabled(self) -> bool: @@ -886,10 +896,10 @@ def _is_greenfield_debug_enabled(self) -> bool: flag = ( fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() ) - self.log.debug(f"Greenfield debug flag value: '{flag}'") + self.log.debug("Greenfield debug flag value: '%s'", flag) self._greenfield_debug_enabled = flag == "enable" except Exception as e: - self.log.debug(f"Failed to get greenfield debug flag: {e}") + self.log.debug("Failed to get greenfield debug flag: %s", e) self._greenfield_debug_enabled = False return self._greenfield_debug_enabled diff --git a/plugins/module_utils/models/__init__.py b/plugins/module_utils/models/__init__.py index 40a96afc..e69de29b 100644 --- a/plugins/module_utils/models/__init__.py +++ b/plugins/module_utils/models/__init__.py @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- diff --git a/plugins/module_utils/models/manage_switches/__init__.py b/plugins/module_utils/models/manage_switches/__init__.py index c093d9e4..e69de29b 100644 --- a/plugins/module_utils/models/manage_switches/__init__.py +++ b/plugins/module_utils/models/manage_switches/__init__.py @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""nd_manage_switches models package. - -Re-exports all model classes, enums, and validators from their individual -modules so that consumers can import directly from the package: - - from .models.nd_manage_switches import SwitchConfigModel, SwitchRole, ... -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -# --- Enums --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( # noqa: F401 - AdvisoryLevel, - AnomalyLevel, - ConfigSyncStatus, - DiscoveryStatus, - PlatformType, - RemoteCredentialStore, - SnmpV3AuthProtocol, - SwitchRole, - SystemMode, - VpcRole, -) - -# --- Validators --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( - SwitchValidators, -) # noqa: F401 - -# --- Nested / shared models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( # noqa: F401 - AdditionalAciSwitchData, - AdditionalSwitchData, - Metadata, - SwitchMetadata, - TelemetryIpCollection, - VpcData, -) - -# --- Discovery models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.discovery_models import ( # noqa: F401 - AddSwitchesRequestModel, - ShallowDiscoveryRequestModel, - SwitchDiscoveryModel, -) - -# --- Switch data models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( # noqa: F401 - SwitchDataModel, -) - -# --- Bootstrap models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.bootstrap_models import ( # noqa: F401 - BootstrapBaseData, - BootstrapBaseModel, - BootstrapCredentialModel, - BootstrapImportSpecificModel, - BootstrapImportSwitchModel, - ImportBootstrapSwitchesRequestModel, -) - -# --- Preprovision models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.preprovision_models import ( # noqa: F401 - PreProvisionSwitchesRequestModel, - PreProvisionSwitchModel, -) - -# --- RMA models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.rma_models import ( # noqa: F401 - RMASwitchModel, -) - -# --- Switch actions models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_actions_models import ( # noqa: F401 - ChangeSwitchSerialNumberRequestModel, - SwitchCredentialsRequestModel, -) - -# --- Config / playbook models --- -from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( # noqa: F401 - ConfigDataModel, - POAPConfigModel, - PreprovisionConfigModel, - RMAConfigModel, - SwitchConfigModel, -) - -__all__ = [ - # Enums - "AdvisoryLevel", - "AnomalyLevel", - "ConfigSyncStatus", - "DiscoveryStatus", - "PlatformType", - "RemoteCredentialStore", - "SnmpV3AuthProtocol", - "SwitchRole", - "SystemMode", - "VpcRole", - # Validators - "SwitchValidators", - # Nested models - "AdditionalAciSwitchData", - "AdditionalSwitchData", - "Metadata", - "SwitchMetadata", - "TelemetryIpCollection", - "VpcData", - # Discovery models - "AddSwitchesRequestModel", - "ShallowDiscoveryRequestModel", - "SwitchDiscoveryModel", - # Switch data models - "SwitchDataModel", - # Bootstrap models - "BootstrapBaseData", - "BootstrapBaseModel", - "BootstrapCredentialModel", - "BootstrapImportSpecificModel", - "BootstrapImportSwitchModel", - "ImportBootstrapSwitchesRequestModel", - # Preprovision models - "PreProvisionSwitchesRequestModel", - "PreProvisionSwitchModel", - # RMA models - "RMASwitchModel", - # Switch actions models - "ChangeSwitchSerialNumberRequestModel", - "SwitchCredentialsRequestModel", - # Config models - "ConfigDataModel", - "POAPConfigModel", - "PreprovisionConfigModel", - "RMAConfigModel", - "SwitchConfigModel", -] diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index d306dd17..e65f0d2f 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -14,7 +14,6 @@ __metaclass__ = type from typing import Any, Dict, List, Optional, ClassVar, Literal -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, @@ -139,7 +138,7 @@ class BootstrapCredentialModel(NDBaseModel): ) @model_validator(mode="after") - def validate_credentials(self) -> Self: + def validate_credentials(self) -> "BootstrapCredentialModel": """Validate credential configuration logic.""" if self.use_new_credentials: if self.remote_credential_store == RemoteCredentialStore.CYBERARK: @@ -318,7 +317,7 @@ def to_payload(self) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True) @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: + def from_response(cls, response: Dict[str, Any]) -> "BootstrapImportSwitchModel": """Create model instance from API response.""" return cls.model_validate(response) diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index e4fe3029..5ae2c393 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -25,7 +25,6 @@ model_validator, ) from typing import Any, Dict, List, Optional, ClassVar, Literal, Union -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( @@ -132,7 +131,7 @@ class POAPConfigModel(NDNestedModel): ) @model_validator(mode="after") - def validate_discovery_credentials_pair(self) -> Self: + def validate_discovery_credentials_pair(self) -> "POAPConfigModel": """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) @@ -210,7 +209,7 @@ class PreprovisionConfigModel(NDNestedModel): ) @model_validator(mode="after") - def validate_discovery_credentials_pair(self) -> Self: + def validate_discovery_credentials_pair(self) -> "PreprovisionConfigModel": """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) @@ -308,7 +307,7 @@ def validate_serial_numbers(cls, v: str) -> str: return result @model_validator(mode="after") - def validate_discovery_credentials_pair(self) -> Self: + def validate_discovery_credentials_pair(self) -> "RMAConfigModel": """Validate that discovery_username and discovery_password are both set or both absent. Mirrors the dcnm_inventory.py bidirectional check: @@ -454,7 +453,7 @@ def to_config_dict(self) -> Dict[str, Any]: ) @model_validator(mode="after") - def reject_auth_proto_for_special_ops(self) -> Self: + def reject_auth_proto_for_special_ops(self) -> "SwitchConfigModel": """Reject non-MD5 auth_proto when POAP, Pre-provision, Swap or RMA is configured. These operations always use MD5 internally. By validating mode='after', @@ -476,7 +475,7 @@ def reject_auth_proto_for_special_ops(self) -> Self: return self @model_validator(mode="after") - def validate_special_ops_exclusion(self) -> Self: + def validate_special_ops_exclusion(self) -> "SwitchConfigModel": """Validate mutually exclusive operation combinations. Allowed: @@ -495,7 +494,7 @@ def validate_special_ops_exclusion(self) -> Self: return self @model_validator(mode="after") - def validate_special_ops_credentials(self) -> Self: + def validate_special_ops_credentials(self) -> "SwitchConfigModel": """Validate credentials for POAP, Pre-provision, Swap and RMA operations.""" if self.poap or self.preprovision or self.rma: if not self.username or not self.password: @@ -509,7 +508,7 @@ def validate_special_ops_credentials(self) -> Self: return self @model_validator(mode="after") - def apply_state_defaults(self, info: ValidationInfo) -> Self: + def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": """Apply state-aware defaults and enforcement using validation context. When ``context={"state": "merged"}`` (or ``"overridden"``) is passed diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index fc42c8c7..2092bd5d 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -14,7 +14,6 @@ __metaclass__ = type from typing import Any, Dict, List, Optional, ClassVar, Literal, Union -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index 96e38588..64986376 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -15,7 +15,6 @@ from ipaddress import ip_network from typing import Any, Dict, List, Optional, ClassVar, Literal -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, @@ -191,7 +190,7 @@ def to_payload(self) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True) @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: + def from_response(cls, response: Dict[str, Any]) -> "PreProvisionSwitchModel": """Create model instance from API response.""" return cls.model_validate(response) diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index d1f253e2..c699ace0 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -14,7 +14,6 @@ __metaclass__ = type from typing import Any, Dict, List, Optional, ClassVar, Literal -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, @@ -135,7 +134,7 @@ def use_new_credentials(self) -> bool: return bool(self.discovery_username and self.discovery_password) @model_validator(mode="after") - def validate_rma_credentials(self) -> Self: + def validate_rma_credentials(self) -> "RMASwitchModel": """Validate RMA credential configuration logic.""" if self.use_new_credentials: if self.remote_credential_store == RemoteCredentialStore.CYBERARK: @@ -157,7 +156,7 @@ def to_payload(self) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True) @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: + def from_response(cls, response: Dict[str, Any]) -> "RMASwitchModel": """Create model instance from API response.""" return cls.model_validate(response) diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index e0384dd6..07f38e4b 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -13,8 +13,7 @@ __metaclass__ = type -from typing import Any, Dict, List, Literal, Optional, ClassVar -from typing_extensions import Self +from typing import List, Literal, Optional, ClassVar from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, @@ -80,7 +79,7 @@ def validate_switch_ids(cls, v: List[str]) -> List[str]: return validated @model_validator(mode="after") - def validate_credentials(self) -> Self: + def validate_credentials(self) -> "SwitchCredentialsRequestModel": """Ensure either local or remote credentials are provided.""" has_local = ( self.switch_username is not None and self.switch_password is not None diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 230c9215..759f131c 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -14,7 +14,6 @@ __metaclass__ = type from typing import Any, Dict, List, Optional, ClassVar, Literal, Union -from typing_extensions import Self from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( Field, @@ -354,7 +353,7 @@ def to_payload(self) -> Dict[str, Any]: return self.model_dump(by_alias=True, exclude_none=True) @classmethod - def from_response(cls, response: Dict[str, Any]) -> Self: + def from_response(cls, response: Dict[str, Any]) -> "SwitchDataModel": """ Create model instance from API response. diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 44a55195..76685d43 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -166,20 +166,27 @@ def save_config( self.ep_config_save, action="Config save" ) self.log.info( - f"Config save succeeded on attempt " - f"{attempt}/{max_retries} for fabric {self.fabric}" + "Config save succeeded on attempt %s/%s for fabric %s", + attempt, + max_retries, + self.fabric, ) return response except SwitchOperationError as exc: last_error = exc self.log.warning( - f"Config save attempt {attempt}/{max_retries} failed " - f"for fabric {self.fabric}: {exc}" + "Config save attempt %s/%s failed for fabric %s: %s", + attempt, + max_retries, + self.fabric, + exc, ) if attempt < max_retries: self.log.info( - f"Retrying config save in {retry_delay}s " - f"(attempt {attempt + 1}/{max_retries})" + "Retrying config save in %ss (attempt %s/%s)", + retry_delay, + attempt + 1, + max_retries, ) time.sleep(retry_delay) raise SwitchOperationError( @@ -235,17 +242,13 @@ def _request_endpoint( Raises: SwitchOperationError: On any request failure. """ - self.log.info(f"{action} for fabric: {self.fabric}") + self.log.info("%s for fabric: %s", action, self.fabric) try: response = self.nd.request(endpoint.path, verb=endpoint.verb) - self.log.info( - f"{action} completed for fabric: {self.fabric}" - ) + self.log.info("%s completed for fabric: %s", action, self.fabric) return response except Exception as e: - self.log.error( - f"{action} failed for fabric {self.fabric}: {e}" - ) + self.log.error("%s failed for fabric %s: %s", action, self.fabric, e) raise SwitchOperationError( f"{action} failed for fabric {self.fabric}: {e}" ) from e diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 6778fd85..237f8901 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -79,7 +79,6 @@ description: - Login password for the switch. type: str - required: true role: description: - Role to assign to the switch in the fabric. @@ -105,6 +104,14 @@ - Set to C(false) for greenfield deployment, C(true) for brownfield. type: bool default: false + platform_type: + description: + - Platform type of the switch. + type: str + default: nx-os + choices: + - nx-os + - ios-xe poap: description: - Bootstrap POAP config for the switch. @@ -137,7 +144,6 @@ description: - Password for device discovery during POAP. type: str - no_log: true image_policy: description: - Name of the image policy to be applied on the switch. @@ -166,7 +172,6 @@ description: - Password for device discovery during pre-provision. type: str - no_log: true model: description: - Model of the switch to Pre-provision (e.g., N9K-C93180YC-EX). @@ -232,7 +237,6 @@ description: - Password for device discovery during POAP and RMA discovery. type: str - no_log: true model: description: - Model of switch to Bootstrap for RMA. @@ -248,7 +252,7 @@ config_data: description: - Basic config data of switch to Bootstrap for RMA. - - C(models) and C(gateway) are mandatory. + - C(models) and C(gateway) are optional. - C(models) is list of model of modules in switch to Bootstrap for RMA. - C(gateway) is the gateway IP with mask for the switch to Bootstrap for RMA. type: dict @@ -258,12 +262,10 @@ - List of module models in the switch. type: list elements: str - required: true gateway: description: - Gateway IP with subnet mask (e.g., 192.168.0.1/24). type: str - required: true extends_documentation_fragment: - cisco.nd.modules @@ -490,12 +492,12 @@ def main(): sw_module.manage_state() # Exit with results - log.info(f"State management completed successfully. Changed: {results.changed}") + log.info("State management completed successfully. Changed: %s", results.changed) sw_module.exit_json() except NDModuleError as error: # NDModule-specific errors (API failures, authentication issues, etc.) - log.error(f"NDModule error: {error.msg}") + log.error("NDModule error: %s", error.msg) # Try to get response from RestSend if available try: @@ -521,13 +523,13 @@ def main(): if output_level == "debug": results.final_result["error_details"] = error.to_dict() - log.error(f"Module failed: {results.final_result}") + log.error("Module failed: %s", results.final_result) module.fail_json(msg=error.msg, **results.final_result) except Exception as error: # Unexpected errors - log.error(f"Unexpected error during module execution: {str(error)}") - log.error(f"Error type: {type(error).__name__}") + log.error("Unexpected error during module execution: %s", str(error)) + log.error("Error type: %s", error.__class__.__name__) # Build failed result results.response_current = { diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml index 04a2e4f2..584a4496 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml @@ -80,7 +80,6 @@ state: deleted config: - seed_ip: "{{ test_data.sw1 }}" - register: delete_result register: result tags: deleted diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt new file mode 100644 index 00000000..c3ca4236 --- /dev/null +++ b/tests/sanity/ignore-2.16.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py index b0ed3f95..60267297 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -156,7 +156,7 @@ def test_endpoints_api_v1_manage_fabrics_00110(): """ instance = EpManageFabricConfigDeployPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_00120(): @@ -247,7 +247,7 @@ def test_endpoints_api_v1_manage_fabrics_00210(): """ instance = EpManageFabricGet() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_00220(): diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py index 263b9f0c..fe913d48 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py @@ -69,7 +69,7 @@ def test_endpoints_api_v1_manage_fabrics_actions_00110(): """ instance = EpManageFabricsActionsShallowDiscoveryPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_actions_00120(): @@ -138,7 +138,7 @@ def test_endpoints_api_v1_manage_fabrics_actions_00210(): """ instance = EpManageFabricsActionsConfigSavePost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_actions_00220(): diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py index bf5f6c68..89349b15 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py @@ -157,7 +157,7 @@ def test_endpoints_api_v1_manage_fabrics_bootstrap_00110(): """ instance = EpManageFabricsBootstrapGet() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_bootstrap_00120(): diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py index d53488ea..79de46b7 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py @@ -68,7 +68,7 @@ def test_endpoints_api_v1_manage_fabrics_inventory_00020(): """ instance = EpManageFabricsInventoryDiscoverGet() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_inventory_00030(): diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py index 0ce1af96..72802bfc 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py @@ -72,7 +72,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00110(): """ instance = EpManageFabricsSwitchActionsRemovePost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switchactions_00120(): @@ -166,7 +166,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00210(): """ instance = EpManageFabricsSwitchActionsChangeRolesPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switchactions_00220(): @@ -257,7 +257,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00310(): """ instance = EpManageFabricsSwitchActionsImportBootstrapPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switchactions_00320(): @@ -351,7 +351,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00410(): """ instance = EpManageFabricsSwitchActionsPreProvisionPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switchactions_00420(): @@ -445,7 +445,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00710(): """ instance = EpManageFabricsSwitchActionsRediscoverPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switchactions_00720(): diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py index a5d7217f..6ee60ef1 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py @@ -184,7 +184,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00110(): """ instance = EpManageFabricsSwitchesGet() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00120(): @@ -275,7 +275,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00210(): """ instance = EpManageFabricsSwitchesPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00220(): @@ -430,7 +430,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00510(): """ instance = EpManageFabricsSwitchProvisionRMAPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00520(): @@ -450,7 +450,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00520(): instance = EpManageFabricsSwitchProvisionRMAPost() instance.fabric_name = "MyFabric" with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00530(): @@ -544,7 +544,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00610(): """ instance = EpManageFabricsSwitchChangeSerialNumberPost() with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00620(): @@ -564,7 +564,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00620(): instance = EpManageFabricsSwitchChangeSerialNumberPost() instance.fabric_name = "MyFabric" with pytest.raises(ValueError): - _ = instance.path + instance.path def test_endpoints_api_v1_manage_fabrics_switches_00630(): @@ -611,4 +611,3 @@ def test_endpoints_api_v1_manage_fabrics_switches_00640(): instance.endpoint_params.cluster_name = "cluster1" result = instance.path assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" - From 261bdbed0b49d74a2c5f42c0a5a8e70e42d6ab8a Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 13:34:57 +0530 Subject: [PATCH 078/109] Black Formatting R2 --- plugins/action/nd_switches_validate.py | 51 ++++++++++++++----- .../manage_switches/nd_switch_resources.py | 8 +-- plugins/module_utils/manage_switches/utils.py | 8 ++- plugins/modules/nd_manage_switches.py | 4 +- .../test_endpoints_api_v1_manage_fabrics.py | 9 +++- ...nts_api_v1_manage_fabrics_switchactions.py | 18 +++++-- ...ndpoints_api_v1_manage_fabrics_switches.py | 24 +++++++-- 7 files changed, 89 insertions(+), 33 deletions(-) diff --git a/plugins/action/nd_switches_validate.py b/plugins/action/nd_switches_validate.py index becbe870..60a54bb9 100644 --- a/plugins/action/nd_switches_validate.py +++ b/plugins/action/nd_switches_validate.py @@ -35,8 +35,13 @@ ) try: - from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import SwitchConfigModel - from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import SwitchDataModel + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, + ) + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( + SwitchDataModel, + ) + HAS_MODELS = True except ImportError: HAS_MODELS = False @@ -48,6 +53,7 @@ # Validation orchestration model # --------------------------------------------------------------------------- + class SwitchesValidate(BaseModel): """Orchestrates the match between playbook config entries and live ND inventory.""" @@ -68,7 +74,11 @@ def parse_config_data(cls, value): if isinstance(value, list): try: return [ - SwitchConfigModel.model_validate(item) if isinstance(item, dict) else item + ( + SwitchConfigModel.model_validate(item) + if isinstance(item, dict) + else item + ) for item in value ] except (ValidationError, ValueError) as e: @@ -84,7 +94,11 @@ def parse_nd_data(cls, value): if isinstance(value, list): try: return [ - SwitchDataModel.from_response(item) if isinstance(item, dict) else item + ( + SwitchDataModel.from_response(item) + if isinstance(item, dict) + else item + ) for item in value ] except (ValidationError, ValueError) as e: @@ -133,13 +147,15 @@ def validate_lists_equality(self): switch_role = nd_item.switch_role # SwitchRole enum or None seed_ip_match = ( - (seed_ip is not None and ip_address is not None and ip_address == seed_ip) - or bool(ignore_fields["seed_ip"]) - ) + seed_ip is not None + and ip_address is not None + and ip_address == seed_ip + ) or bool(ignore_fields["seed_ip"]) role_match = ( - (role_expected is not None and switch_role is not None and switch_role == role_expected) - or bool(ignore_fields["role"]) - ) + role_expected is not None + and switch_role is not None + and switch_role == role_expected + ) or bool(ignore_fields["role"]) if seed_ip_match and role_match: matched_indices.add(i) @@ -155,7 +171,9 @@ def validate_lists_equality(self): role_mismatches.setdefault( seed_ip or ip_address, { - "expected_role": role_expected.value if role_expected else None, + "expected_role": ( + role_expected.value if role_expected else None + ), "response_role": switch_role.value if switch_role else None, }, ) @@ -174,7 +192,11 @@ def validate_lists_equality(self): if missing_ips: display.display(" Missing IPs: {0}".format(missing_ips)) if role_mismatches: - display.display(" Role mismatches: {0}".format(json.dumps(role_mismatches, indent=2))) + display.display( + " Role mismatches: {0}".format( + json.dumps(role_mismatches, indent=2) + ) + ) self.response = False return self @@ -184,6 +206,7 @@ def validate_lists_equality(self): # Action plugin # --------------------------------------------------------------------------- + class ActionModule(ActionBase): """Ansible action plugin for validating ND switch inventory data. @@ -203,7 +226,9 @@ def run(self, tmp=None, task_vars=None): if not HAS_PYDANTIC or not HAS_MODELS: results["failed"] = True - results["msg"] = "pydantic and the ND collection models are required for nd_switches_validate" + results["msg"] = ( + "pydantic and the ND collection models are required for nd_switches_validate" + ) return results nd_data = self._task.args["nd_data"] diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index e1523276..a90638c5 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -3023,9 +3023,7 @@ def _handle_deleted_state( else: self.log.info("Switch not found for deletion: %s", identifier) - self.log.info( - "Total switches marked for deletion: %s", len(switches_to_delete) - ) + self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) if not switches_to_delete: self.log.info("No switches to delete") return @@ -3087,9 +3085,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: else: switches = [] - self.log.debug( - "Queried %s switches from fabric %s", len(switches), self.fabric - ) + self.log.debug("Queried %s switches from fabric %s", len(switches), self.fabric) return switches # ===================================================================== diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index 36c02aef..23062f91 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -567,7 +567,9 @@ def _wait_for_system_mode(self, serial_numbers: List[str]) -> bool: if pending is None: return False - self.log.info("All switches in normal system mode — proceeding to discovery checks") + self.log.info( + "All switches in normal system mode — proceeding to discovery checks" + ) return True def _poll_system_mode( @@ -806,7 +808,9 @@ def _wait_for_switches_in_fabric( ) time.sleep(self.wait_interval) - self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) + self.log.warning( + "Timeout waiting for switches to appear in fabric: %s", pending + ) return False def _fetch_switch_data( diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 237f8901..593162f9 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -492,7 +492,9 @@ def main(): sw_module.manage_state() # Exit with results - log.info("State management completed successfully. Changed: %s", results.changed) + log.info( + "State management completed successfully. Changed: %s", results.changed + ) sw_module.exit_json() except NDModuleError as error: diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py index 60267297..efd7c931 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -85,7 +85,9 @@ def test_endpoints_api_v1_manage_fabrics_00030(): - FabricConfigDeployEndpointParams.to_query_string() """ with does_not_raise(): - params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) + params = FabricConfigDeployEndpointParams( + force_show_run=True, incl_all_msd_switches=True + ) result = params.to_query_string() assert "forceShowRun=true" in result assert "inclAllMsdSwitches=true" in result @@ -199,7 +201,10 @@ def test_endpoints_api_v1_manage_fabrics_00130(): instance.fabric_name = "MyFabric" instance.endpoint_params.force_show_run = True result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" + ) # ============================================================================= diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py index 72802bfc..49fa0b49 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py @@ -209,7 +209,10 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00230(): instance.fabric_name = "MyFabric" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" + ) # ============================================================================= @@ -301,7 +304,9 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00330(): instance.endpoint_params.cluster_name = "cluster1" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?") + assert result.startswith( + "/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?" + ) assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -395,7 +400,9 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00430(): instance.endpoint_params.cluster_name = "cluster1" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?") + assert result.startswith( + "/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?" + ) assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -488,4 +495,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00730(): instance.fabric_name = "MyFabric" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" + ) diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py index 6ee60ef1..64e3dbd9 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py @@ -133,7 +133,9 @@ def test_endpoints_api_v1_manage_fabrics_switches_00050(): - FabricSwitchesAddEndpointParams.to_query_string() """ with does_not_raise(): - params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + params = FabricSwitchesAddEndpointParams( + cluster_name="cluster1", ticket_id="CHG12345" + ) result = params.to_query_string() assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -473,7 +475,10 @@ def test_endpoints_api_v1_manage_fabrics_switches_00530(): instance.fabric_name = "MyFabric" instance.switch_sn = "SAL1948TRTT" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" + ) def test_endpoints_api_v1_manage_fabrics_switches_00540(): @@ -496,7 +501,10 @@ def test_endpoints_api_v1_manage_fabrics_switches_00540(): instance.switch_sn = "SAL1948TRTT" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" + ) # ============================================================================= @@ -587,7 +595,10 @@ def test_endpoints_api_v1_manage_fabrics_switches_00630(): instance.fabric_name = "MyFabric" instance.switch_sn = "SAL1948TRTT" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" + ) def test_endpoints_api_v1_manage_fabrics_switches_00640(): @@ -610,4 +621,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00640(): instance.switch_sn = "SAL1948TRTT" instance.endpoint_params.cluster_name = "cluster1" result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" + assert ( + result + == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" + ) From 48d22480867bce23da402fdfb258501b33623358 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 14:43:35 +0530 Subject: [PATCH 079/109] Handle Logging Inconsistencies --- .../manage_switches/nd_switch_resources.py | 95 ++++++++----------- plugins/module_utils/manage_switches/utils.py | 72 +++++--------- plugins/modules/nd_manage_switches.py | 12 +-- 3 files changed, 68 insertions(+), 111 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index a90638c5..ce9af018 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -326,17 +326,14 @@ def compute_changes( if prop_dict.get(k) != existing_dict.get(k) } log.info( - "Switch %s has differences — marking to_update. Changed fields: %s", - ip, - diff_keys, + f"Switch {ip} has differences — marking to_update. " + f"Changed fields: {diff_keys}" ) proposed_diff = {k: prop_dict.get(k) for k in diff_keys} existing_diff = {k: existing_dict.get(k) for k in diff_keys} log.debug( - "Switch %s diff detail — proposed: %s, existing: %s", - ip, - proposed_diff, - existing_diff, + f"Switch {ip} diff detail — proposed: {proposed_diff}, " + f"existing: {existing_diff}" ) changes["to_update"].append(prop_sw) @@ -2416,7 +2413,7 @@ def __init__( self.poap_handler = POAPHandler(self.ctx, self.fabric_ops, self.wait_utils) self.rma_handler = RMAHandler(self.ctx, self.fabric_ops, self.wait_utils) - log.info("Initialized NDSwitchResourceModule for fabric: %s", self.fabric) + log.info(f"Initialized NDSwitchResourceModule for fabric: {self.fabric}") def exit_json(self) -> None: """Finalize collected results and exit the Ansible module. @@ -2474,7 +2471,7 @@ def manage_state(self) -> None: Returns: None. """ - self.log.info("Managing state: %s", self.state) + self.log.info(f"Managing state: {self.state}") # gathered — read-only, no config accepted if self.state == "gathered": @@ -2521,10 +2518,8 @@ def manage_state(self) -> None: self.output.assign(proposed=output_proposed) self.log.info( - "Config partition: %s normal, %s poap, %s rma", - len(normal_configs), - len(poap_configs), - len(rma_configs), + f"Config partition: {len(normal_configs)} normal, " + f"{len(poap_configs)} poap, {len(rma_configs)} rma" ) # POAP and RMA are only valid with state=merged @@ -2542,10 +2537,9 @@ def manage_state(self) -> None: ] if configs_to_discover: self.log.info( - "Discovery needed for %s/%s switch(es) — %s already in fabric", - len(configs_to_discover), - len(normal_configs), - len(normal_configs) - len(configs_to_discover), + f"Discovery needed for {len(configs_to_discover)}/" + f"{len(normal_configs)} switch(es) — " + f"{len(normal_configs) - len(configs_to_discover)} already in fabric" ) discovered_data = self.discovery.discover(configs_to_discover) else: @@ -2598,8 +2592,8 @@ def _handle_merged_state( """ self.log.debug("ENTER: _handle_merged_state()") self.log.info("Handling merged state") - self.log.debug("Proposed configs: %s", len(self.proposed)) - self.log.debug("Existing switches: %s", len(self.existing)) + self.log.debug(f"Proposed configs: {len(self.proposed)}") + self.log.debug(f"Existing switches: {len(self.existing)}") if not self.proposed: self.log.info("No configurations provided for merged state") @@ -2625,10 +2619,9 @@ def _handle_merged_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - "Check mode: would add %s, process %s migration switch(es), save_deploy_required=%s", - len(switches_to_add), - len(migration_switches), - idempotent_save_req, + f"Check mode: would add {len(switches_to_add)}, process " + f"{len(migration_switches)} migration switch(es), " + f"save_deploy_required={idempotent_save_req}" ) self.results.action = "merge" self.results.state = self.state @@ -2661,8 +2654,7 @@ def _handle_merged_state( add_configs.append(cfg) else: self.log.warning( - "No config found for switch %s, skipping add", - sw.fabric_management_ip, + f"No config found for switch {sw.fabric_management_ip}, skipping add" ) if add_configs: @@ -2684,8 +2676,7 @@ def _handle_merged_state( pairs.append((cfg, disc)) else: self.log.warning( - "No discovery data for %s, skipping", - cfg.seed_ip, + f"No discovery data for {cfg.seed_ip}, skipping" ) if not pairs: @@ -2853,11 +2844,8 @@ def _handle_overridden_state( n_add = len(diff.get("to_add", [])) n_migrate = len(diff.get("migration_mode", [])) self.log.info( - "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", - n_delete, - n_update, - n_add, - n_migrate, + f"Check mode: would delete {n_delete}, delete-and-re-add " + f"{n_update}, add {n_add}, migrate {n_migrate}" ) self.results.action = "override" self.results.state = self.state @@ -2881,9 +2869,8 @@ def _handle_overridden_state( # Phase 1: Switches not in proposed config for sw in diff.get("to_delete", []): self.log.info( - "Marking for deletion (not in proposed): %s (%s)", - sw.fabric_management_ip, - sw.switch_id, + f"Marking for deletion (not in proposed): " + f"{sw.fabric_management_ip} ({sw.switch_id})" ) switches_to_delete.append(sw) self._log_operation("delete", sw.fabric_management_ip) @@ -2901,9 +2888,8 @@ def _handle_overridden_state( ) if existing_sw: self.log.info( - "Marking for deletion (re-add update): %s (%s)", - existing_sw.fabric_management_ip, - existing_sw.switch_id, + f"Marking for deletion (re-add update): " + f"{existing_sw.fabric_management_ip} ({existing_sw.switch_id})" ) switches_to_delete.append(existing_sw) self._log_operation( @@ -2933,9 +2919,9 @@ def _handle_overridden_state( ] if configs_needing_rediscovery: self.log.info( - "Re-discovering %s switch(es) after deletion for re-add: %s", - len(configs_needing_rediscovery), - [cfg.seed_ip for cfg in configs_needing_rediscovery], + f"Re-discovering {len(configs_needing_rediscovery)} switch(es) " + f"after deletion for re-add: " + f"{[cfg.seed_ip for cfg in configs_needing_rediscovery]}" ) fresh_discovered = self.discovery.discover(configs_needing_rediscovery) discovered_data = {**(discovered_data or {}), **fresh_discovered} @@ -2955,10 +2941,10 @@ def _handle_gathered_state(self) -> None: None. """ self.log.debug("ENTER: _handle_gathered_state()") - self.log.info("Gathering inventory for fabric '%s'", self.fabric) + self.log.info(f"Gathering inventory for fabric '{self.fabric}'") if not self.existing: - self.log.info("Fabric '%s' has no switches in inventory", self.fabric) + self.log.info(f"Fabric '{self.fabric}' has no switches in inventory") self.results.action = "gathered" self.results.state = self.state @@ -2969,9 +2955,7 @@ def _handle_gathered_state(self) -> None: self.results.register_api_call() self.log.info( - "Gathered %s switch(es) from fabric '%s'", - len(list(self.existing)), - self.fabric, + f"Gathered {len(list(self.existing))} switch(es) from fabric '{self.fabric}'" ) self.log.debug("EXIT: _handle_gathered_state()") @@ -3015,15 +2999,13 @@ def _handle_deleted_state( ) if existing_switch: self.log.info( - "Marking for deletion: %s (%s)", - identifier, - existing_switch.switch_id, + f"Marking for deletion: {identifier} ({existing_switch.switch_id})" ) switches_to_delete.append(existing_switch) else: - self.log.info("Switch not found for deletion: %s", identifier) + self.log.info(f"Switch not found for deletion: {identifier}") - self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) + self.log.info(f"Total switches marked for deletion: {len(switches_to_delete)}") if not switches_to_delete: self.log.info("No switches to delete") return @@ -3031,7 +3013,7 @@ def _handle_deleted_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - "Check mode: would delete %s switch(es)", len(switches_to_delete) + f"Check mode: would delete {len(switches_to_delete)} switch(es)" ) self.results.action = "delete" self.results.state = self.state @@ -3048,8 +3030,7 @@ def _handle_deleted_state( return self.log.info( - "Proceeding to delete %s switch(es) from fabric", - len(switches_to_delete), + f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" ) self.fabric_ops.bulk_delete(switches_to_delete) for sw in switches_to_delete: @@ -3068,8 +3049,8 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: """ endpoint = EpManageFabricsSwitchesGet() endpoint.fabric_name = self.fabric - self.log.debug("Querying all switches with endpoint: %s", endpoint.path) - self.log.debug("Query verb: %s", endpoint.verb) + self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") + self.log.debug(f"Query verb: {endpoint.verb}") try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) @@ -3085,7 +3066,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: else: switches = [] - self.log.debug("Queried %s switches from fabric %s", len(switches), self.fabric) + self.log.debug(f"Queried {len(switches)} switches from fabric {self.fabric}") return switches # ===================================================================== diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index 23062f91..6447a07d 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -199,11 +199,7 @@ def group_switches_by_credentials( ) groups.setdefault(group_key, []).append(switch) - log.info( - "Grouped %s switches into %s credential group(s)", - len(switches), - len(groups), - ) + log.info(f"Grouped {len(switches)} switches into {len(groups)} credential group(s)") for idx, (key, group_switches) in enumerate(groups.items(), 1): username, _pw_hash, auth_proto, platform_type, preserve_config = key @@ -249,7 +245,7 @@ def query_bootstrap_switches( endpoint = EpManageFabricsBootstrapGet() endpoint.fabric_name = fabric - log.debug("Bootstrap endpoint: %s", endpoint.path) + log.debug(f"Bootstrap endpoint: {endpoint.path}") try: result = nd.request( @@ -268,7 +264,7 @@ def query_bootstrap_switches( else: switches = [] - log.info("Bootstrap API returned %s switch(es) in POAP loop", len(switches)) + log.info(f"Bootstrap API returned {len(switches)} switch(es) in POAP loop") log.debug("EXIT: query_bootstrap_switches()") return switches @@ -427,7 +423,7 @@ def wait_for_switch_manageable( Returns: ``True`` if all switches are manageable, ``False`` on timeout. """ - self.log.info("Waiting for switches to become manageable: %s", serial_numbers) + self.log.info(f"Waiting for switches to become manageable: {serial_numbers}") # Phase 1 + 2: migration → normal if not self._wait_for_system_mode(serial_numbers): @@ -511,28 +507,23 @@ def wait_for_discovery( attempts = max_attempts or 30 interval = wait_interval or self.wait_interval - self.log.info("Waiting for discovery of: %s", seed_ip) + self.log.info(f"Waiting for discovery of: {seed_ip}") for attempt in range(attempts): status = self._get_discovery_status(seed_ip) if status and status.get("status") in self.MANAGEABLE_STATUSES: - self.log.info("Discovery completed for %s", seed_ip) + self.log.info(f"Discovery completed for {seed_ip}") return status if status and status.get("status") in self.FAILED_STATUSES: - self.log.error("Discovery failed for %s: %s", seed_ip, status) + self.log.error(f"Discovery failed for {seed_ip}: {status}") return None - self.log.debug( - "Discovery attempt %s/%s for %s", - attempt + 1, - attempts, - seed_ip, - ) + self.log.debug(f"Discovery attempt {attempt + 1}/{attempts} for {seed_ip}") time.sleep(interval) - self.log.warning("Discovery timeout for %s", seed_ip) + self.log.warning(f"Discovery timeout for {seed_ip}") return None # ===================================================================== @@ -606,21 +597,17 @@ def _poll_system_mode( ) if not remaining: - self.log.info("All switches %s mode (attempt %s)", label, attempt) + self.log.info(f"All switches {label} mode (attempt {attempt})") return remaining pending = remaining self.log.debug( - "Attempt %s/%s: %s switch(es) waiting to %s: %s", - attempt, - self.max_attempts, - len(pending), - label, - pending, + f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " + f"switch(es) waiting to {label}: {pending}" ) time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) - self.log.warning("Timeout waiting for switches to %s: %s", label, pending) + self.log.warning(f"Timeout waiting for switches to {label}: {pending}") return None # ===================================================================== @@ -738,17 +725,13 @@ def _wait_for_discovery_state( self._trigger_rediscovery(pending) self.log.debug( - "Attempt %s/%s: %s switch(es) not yet '%s': %s", - attempt, - self.max_attempts, - len(pending), - target_state, - pending, + f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " + f"switch(es) not yet '{target_state}': {pending}" ) time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) self.log.warning( - "Timeout waiting for '%s' state: %s", target_state, serial_numbers + f"Timeout waiting for '{target_state}' state: {serial_numbers}" ) return False @@ -800,17 +783,12 @@ def _wait_for_switches_in_fabric( return True self.log.debug( - "Attempt %s/%s: %s switch(es) not yet in fabric: %s", - attempt, - self.max_attempts, - len(pending), - pending, + f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " + f"switch(es) not yet in fabric: {pending}" ) time.sleep(self.wait_interval) - self.log.warning( - "Timeout waiting for switches to appear in fabric: %s", pending - ) + self.log.warning(f"Timeout waiting for switches to appear in fabric: {pending}") return False def _fetch_switch_data( @@ -832,7 +810,7 @@ def _fetch_switch_data( return None return switch_data except Exception as e: - self.log.error("Failed to fetch switch data: %s", e) + self.log.error(f"Failed to fetch switch data: {e}") return None def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: @@ -845,7 +823,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: return payload = {"switchIds": serial_numbers} - self.log.info("Triggering rediscovery for: %s", serial_numbers) + self.log.info(f"Triggering rediscovery for: {serial_numbers}") try: self.nd.request( self.ep_rediscover.path, @@ -853,7 +831,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: data=payload, ) except Exception as e: - self.log.warning("Failed to trigger rediscovery: %s", e) + self.log.warning(f"Failed to trigger rediscovery: {e}") def _get_discovery_status( self, @@ -877,7 +855,7 @@ def _get_discovery_status( return switch return None except Exception as e: - self.log.debug("Discovery status check failed: %s", e) + self.log.debug(f"Discovery status check failed: {e}") return None def _is_greenfield_debug_enabled(self) -> bool: @@ -900,10 +878,10 @@ def _is_greenfield_debug_enabled(self) -> bool: flag = ( fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() ) - self.log.debug("Greenfield debug flag value: '%s'", flag) + self.log.debug(f"Greenfield debug flag value: '{flag}'") self._greenfield_debug_enabled = flag == "enable" except Exception as e: - self.log.debug("Failed to get greenfield debug flag: %s", e) + self.log.debug(f"Failed to get greenfield debug flag: {e}") self._greenfield_debug_enabled = False return self._greenfield_debug_enabled diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 593162f9..98686c65 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -492,14 +492,12 @@ def main(): sw_module.manage_state() # Exit with results - log.info( - "State management completed successfully. Changed: %s", results.changed - ) + log.info(f"State management completed successfully. Changed: {results.changed}") sw_module.exit_json() except NDModuleError as error: # NDModule-specific errors (API failures, authentication issues, etc.) - log.error("NDModule error: %s", error.msg) + log.error(f"NDModule error: {error.msg}") # Try to get response from RestSend if available try: @@ -525,13 +523,13 @@ def main(): if output_level == "debug": results.final_result["error_details"] = error.to_dict() - log.error("Module failed: %s", results.final_result) + log.error(f"Module failed: {results.final_result}") module.fail_json(msg=error.msg, **results.final_result) except Exception as error: # Unexpected errors - log.error("Unexpected error during module execution: %s", str(error)) - log.error("Error type: %s", error.__class__.__name__) + log.error(f"Unexpected error during module execution: {str(error)}") + log.error(f"Error type: {error.__class__.__name__}") # Build failed result results.response_current = { From ca3ee28377867975ec28b4291002b9c8dfdf21ae Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 15:11:01 +0530 Subject: [PATCH 080/109] Sanity, Doc Fixes --- .../manage_switches/nd_switch_resources.py | 355 ++++++++---------- plugins/module_utils/manage_switches/utils.py | 72 ++-- .../models/manage_switches/config_models.py | 7 +- plugins/modules/nd_manage_switches.py | 19 +- 4 files changed, 196 insertions(+), 257 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index ce9af018..2d78cb6a 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -159,7 +159,7 @@ def validate_configs( log.debug("ENTER: validate_configs()") configs_list = config if isinstance(config, list) else [config] - log.debug(f"Normalized to {len(configs_list)} configuration(s)") + log.debug("Normalized to %s configuration(s)", len(configs_list)) validated_configs: List[SwitchConfigModel] = [] for idx, cfg in enumerate(configs_list): @@ -214,12 +214,10 @@ def validate_configs( operation_types = {c.operation_type for c in validated_configs} log.info( - f"Successfully validated {len(validated_configs)} " - f"configuration(s) with operation type(s): {operation_types}" + "Successfully validated %s configuration(s) with operation type(s): %s", len(validated_configs), operation_types ) log.debug( - f"EXIT: validate_configs() -> " - f"{len(validated_configs)} configs, operation_types={operation_types}" + "EXIT: validate_configs() -> %s configs, operation_types=%s", len(validated_configs), operation_types ) return validated_configs @@ -243,7 +241,7 @@ def compute_changes( """ log.debug("ENTER: compute_changes()") log.debug( - f"Comparing {len(proposed)} proposed vs {len(existing)} existing switches" + "Comparing %s proposed vs %s existing switches", len(proposed), len(existing) ) # Build indexes for O(1) lookups @@ -251,8 +249,7 @@ def compute_changes( existing_by_ip = {sw.fabric_management_ip: sw for sw in existing} log.debug( - f"Indexes built — existing_by_id: {list(existing_by_id.keys())}, " - f"existing_by_ip: {list(existing_by_ip.keys())}" + "Indexes built — existing_by_id: %s, existing_by_ip: %s", list(existing_by_id.keys()), list(existing_by_ip.keys()) ) # Only user-controllable fields populated by both discovery and @@ -291,21 +288,19 @@ def compute_changes( if not existing_sw: log.info( - f"Switch {ip} (id={sid}) not found in existing — marking to_add" - ) + "Switch %s (id=%s) not found in existing — marking to_add", ip, sid) changes["to_add"].append(prop_sw) continue log.debug( - f"Switch {ip} (id={sid}) found in existing with {match_key} match {existing_sw}" + "Switch %s (id=%s) found in existing with %s match %s", ip, sid, match_key, existing_sw ) log.debug( - f"Switch {ip} matched existing by {match_key} " - f"(existing_id={existing_sw.switch_id})" + "Switch %s matched existing by %s (existing_id=%s)", ip, match_key, existing_sw.switch_id ) if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: - log.info(f"Switch {ip} ({existing_sw.switch_id}) is in Migration mode") + log.info("Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id) changes["migration_mode"].append(prop_sw) continue @@ -317,7 +312,7 @@ def compute_changes( ) if prop_dict == existing_dict: - log.debug(f"Switch {ip} is idempotent — no changes needed") + log.debug("Switch %s is idempotent — no changes needed", ip) changes["idempotent"].append(prop_sw) else: diff_keys = { @@ -326,14 +321,12 @@ def compute_changes( if prop_dict.get(k) != existing_dict.get(k) } log.info( - f"Switch {ip} has differences — marking to_update. " - f"Changed fields: {diff_keys}" + "Switch %s has differences — marking to_update. Changed fields: %s", ip, diff_keys ) proposed_diff = {k: prop_dict.get(k) for k in diff_keys} existing_diff = {k: existing_dict.get(k) for k in diff_keys} log.debug( - f"Switch {ip} diff detail — proposed: {proposed_diff}, " - f"existing: {existing_diff}" + "Switch %s diff detail — proposed: %s, existing: %s", ip, proposed_diff, existing_diff ) changes["to_update"].append(prop_sw) @@ -342,18 +335,17 @@ def compute_changes( for existing_sw in existing: if existing_sw.switch_id not in proposed_ids: log.info( - f"Existing switch {existing_sw.fabric_management_ip} " - f"({existing_sw.switch_id}) not in proposed — marking to_delete" + "Existing switch %s (%s) not in proposed — marking to_delete", existing_sw.fabric_management_ip, existing_sw.switch_id ) changes["to_delete"].append(existing_sw) log.info( - f"Compute changes summary: " - f"to_add={len(changes['to_add'])}, " - f"to_update={len(changes['to_update'])}, " - f"to_delete={len(changes['to_delete'])}, " - f"migration_mode={len(changes['migration_mode'])}, " - f"idempotent={len(changes['idempotent'])}" + "Compute changes summary: to_add=%s, to_update=%s, to_delete=%s, migration_mode=%s, idempotent=%s", + len(changes["to_add"]), + len(changes["to_update"]), + len(changes["to_delete"]), + len(changes["migration_mode"]), + len(changes["idempotent"]), ) log.debug("EXIT: compute_changes()") return changes @@ -444,12 +436,10 @@ def validate_switch_api_fields( pulled.append("config_data (gateway + models)") if pulled: log.info( - f"{context} serial '{serial}': the following fields were not " - f"provided and will be sourced from the bootstrap API: " - f"{', '.join(pulled)}" + "%s serial '%s': the following fields were not provided and will be sourced from the bootstrap API: %s", context, serial, ', '.join(pulled) ) else: - log.debug(f"{context} field validation passed for serial '{serial}'") + log.debug("%s field validation passed for serial '%s'", context, serial) # ========================================================================= @@ -486,7 +476,7 @@ def discover( log = self.ctx.log log.debug("Step 1: Grouping switches by credentials") credential_groups = group_switches_by_credentials(switch_configs, log) - log.debug(f"Created {len(credential_groups)} credential group(s)") + log.debug("Created %s credential group(s)", len(credential_groups)) log.debug("Step 2: Bulk discovering switches") all_discovered: Dict[str, Dict[str, Any]] = {} @@ -495,7 +485,7 @@ def discover( password = switches[0].password log.debug( - f"Discovering group: {len(switches)} switches with username={username}" + "Discovering group: %s switches with username=%s", len(switches), username ) try: discovered_batch = self.bulk_discover( @@ -515,7 +505,7 @@ def discover( log.error(msg) self.ctx.nd.module.fail_json(msg=msg) - log.debug(f"Total discovered: {len(all_discovered)} switches") + log.debug("Total discovered: %s switches", len(all_discovered)) return all_discovered def bulk_discover( @@ -543,13 +533,13 @@ def bulk_discover( results = self.ctx.results log.debug("ENTER: bulk_discover()") - log.debug(f"Discovering {len(switches)} switches in bulk") + log.debug("Discovering %s switches in bulk", len(switches)) endpoint = EpManageFabricsActionsShallowDiscoveryPost() endpoint.fabric_name = self.ctx.fabric seed_ips = [switch.seed_ip for switch in switches] - log.debug(f"Seed IPs: {seed_ips}") + log.debug("Seed IPs: %s", seed_ips) max_hops = _DISCOVERY_MAX_HOPS @@ -563,9 +553,9 @@ def bulk_discover( ) payload = discovery_request.to_payload() - log.info(f"Bulk discovering {len(seed_ips)} switches: {', '.join(seed_ips)}") - log.debug(f"Discovery endpoint: {endpoint.path}") - log.debug(f"Discovery payload (password masked): {mask_password(payload)}") + log.info("Bulk discovering %s switches: %s", len(seed_ips), ', '.join(seed_ips)) + log.debug("Discovery endpoint: %s", endpoint.path) + log.debug("Discovery payload (password masked): %s", mask_password(payload)) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -591,7 +581,7 @@ def bulk_discover( switches_data = response.get("switches", []) log.debug( - f"Extracted {len(switches_data)} switches from discovery response" + "Extracted %s switches from discovery response", len(switches_data) ) discovered_results: Dict[str, Dict[str, Any]] = {} @@ -621,27 +611,26 @@ def bulk_discover( if status in ("manageable", "ok"): discovered_results[ip] = discovered log.info( - f"Switch {ip} ({serial_number}) discovered successfully - status: {status}" + "Switch %s (%s) discovered successfully - status: %s", ip, serial_number, status ) elif status == "alreadymanaged": - log.info(f"Switch {ip} ({serial_number}) is already managed") + log.info("Switch %s (%s) is already managed", ip, serial_number) discovered_results[ip] = discovered else: reason = discovered.get("statusReason", "Unknown") log.error( - f"Switch {ip} discovery failed - status: {status}, reason: {reason}" + "Switch %s discovery failed - status: %s, reason: %s", ip, status, reason ) for seed_ip in seed_ips: if seed_ip not in discovered_results: - log.warning(f"Switch {seed_ip} not found in discovery response") + log.warning("Switch %s not found in discovery response", seed_ip) log.info( - f"Bulk discovery completed: " - f"{len(discovered_results)}/{len(seed_ips)} switches successful" + "Bulk discovery completed: %s/%s switches successful", len(discovered_results), len(seed_ips) ) - log.debug(f"Discovered switches: {list(discovered_results.keys())}") - log.debug(f"EXIT: bulk_discover() -> {len(discovered_results)} discovered") + log.debug("Discovered switches: %s", list(discovered_results.keys())) + log.debug("EXIT: bulk_discover() -> %s discovered", len(discovered_results)) return discovered_results except Exception as e: @@ -676,7 +665,7 @@ def build_proposed( if cfg.role is not None: discovered = {**discovered, "role": cfg.role} proposed.append(SwitchDataModel.from_response(discovered)) - log.debug(f"Built proposed model from discovery for {seed_ip}") + log.debug("Built proposed model from discovery for %s", seed_ip) continue # Fallback: switch may already be in the fabric inventory @@ -694,8 +683,7 @@ def build_proposed( else: proposed.append(existing_match) log.debug( - f"Switch {seed_ip} already in fabric inventory — " - f"using existing record (discovery skipped)" + "Switch %s already in fabric inventory — using existing record (discovery skipped)", seed_ip ) continue @@ -757,7 +745,7 @@ def bulk_add( results = self.ctx.results log.debug("ENTER: bulk_add()") - log.debug(f"Adding {len(switches)} switches to fabric") + log.debug("Adding %s switches to fabric", len(switches)) endpoint = EpManageFabricsSwitchesPost() endpoint.fabric_name = self.ctx.fabric @@ -787,8 +775,7 @@ def bulk_add( ) switch_discoveries.append(switch_discovery) log.debug( - f"Prepared switch for add: " - f"{discovered.get('serialNumber')} ({discovered.get('hostname')})" + "Prepared switch for add: %s (%s)", discovered.get('serialNumber'), discovered.get('hostname') ) if not switch_discoveries: @@ -809,11 +796,10 @@ def bulk_add( payload = add_request.to_payload() serial_numbers = [d.get("serialNumber") for _cfg, d in switches] log.info( - f"Bulk adding {len(switches)} switches to fabric " - f"{self.ctx.fabric}: {', '.join(serial_numbers)}" + "Bulk adding %s switches to fabric %s: %s", len(switches), self.ctx.fabric, ', '.join(serial_numbers) ) - log.debug(f"Add endpoint: {endpoint.path}") - log.debug(f"Add payload (password masked): {mask_password(payload)}") + log.debug("Add endpoint: %s", endpoint.path) + log.debug("Add payload (password masked): %s", mask_password(payload)) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -884,7 +870,7 @@ def bulk_delete( ip = getattr(switch, "fabric_management_ip", None) or getattr( switch, "ip", None ) - log.warning(f"Cannot delete switch {ip}: no serial number/switch_id") + log.warning("Cannot delete switch %s: no serial number/switch_id", ip) if not serial_numbers: log.warning("No valid serial numbers found for deletion") @@ -896,11 +882,10 @@ def bulk_delete( payload = {"switchIds": serial_numbers} log.info( - f"Bulk removing {len(serial_numbers)} switch(es) from fabric " - f"{self.ctx.fabric}: {serial_numbers}" + "Bulk removing %s switch(es) from fabric %s: %s", len(serial_numbers), self.ctx.fabric, serial_numbers ) - log.debug(f"Delete endpoint: {endpoint.path}") - log.debug(f"Delete payload: {payload}") + log.debug("Delete endpoint: %s", endpoint.path) + log.debug("Delete payload: %s", payload) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -915,12 +900,12 @@ def bulk_delete( results.diff_current = {"deleted": serial_numbers} results.register_api_call() - log.info(f"Bulk delete submitted for {len(serial_numbers)} switch(es)") + log.info("Bulk delete submitted for %s switch(es)", len(serial_numbers)) log.debug("EXIT: bulk_delete()") return serial_numbers except Exception as e: - log.error(f"Bulk delete failed: {e}") + log.error("Bulk delete failed: %s", e) raise SwitchOperationError( f"Bulk delete failed for {serial_numbers}: {e}" ) from e @@ -947,7 +932,7 @@ def bulk_save_credentials( for sn, cfg in switch_actions: if not cfg.username or not cfg.password: log.debug( - f"Skipping credentials for {sn}: missing username or password" + "Skipping credentials for %s: missing username or password", sn ) continue key = (cfg.username, cfg.password) @@ -968,10 +953,10 @@ def bulk_save_credentials( payload = creds_request.to_payload() log.info( - f"Saving credentials for {len(serial_numbers)} switch(es): {serial_numbers}" + "Saving credentials for %s switch(es): %s", len(serial_numbers), serial_numbers ) - log.debug(f"Credentials endpoint: {endpoint.path}") - log.debug(f"Credentials payload (masked): {mask_password(payload)}") + log.debug("Credentials endpoint: %s", endpoint.path) + log.debug("Credentials payload (masked): %s", mask_password(payload)) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -988,7 +973,7 @@ def bulk_save_credentials( "username": username, } results.register_api_call() - log.info(f"Credentials saved for {len(serial_numbers)} switch(es)") + log.info("Credentials saved for %s switch(es)", len(serial_numbers)) except Exception as e: msg = ( f"Failed to save credentials for " f"switches {serial_numbers}: {e}" @@ -1032,9 +1017,9 @@ def bulk_update_roles( endpoint.fabric_name = self.ctx.fabric payload = {"switchRoles": switch_roles} - log.info(f"Bulk updating roles for {len(switch_roles)} switch(es)") - log.debug(f"ChangeRoles endpoint: {endpoint.path}") - log.debug(f"ChangeRoles payload: {payload}") + log.info("Bulk updating roles for %s switch(es)", len(switch_roles)) + log.debug("ChangeRoles endpoint: %s", endpoint.path) + log.debug("ChangeRoles payload: %s", payload) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -1048,7 +1033,7 @@ def bulk_update_roles( results.result_current = result results.diff_current = payload results.register_api_call() - log.info(f"Roles updated for {len(switch_roles)} switch(es)") + log.info("Roles updated for %s switch(es)", len(switch_roles)) except Exception as e: msg = f"Failed to bulk update roles for switches: {e}" log.error(msg) @@ -1103,8 +1088,7 @@ def post_add_processing( all_serials = [sn for sn, _cfg in switch_actions] log.info( - f"Waiting for {len(all_serials)} {context} " - f"switch(es) to become manageable: {all_serials}" + "Waiting for %s %s switch(es) to become manageable: %s", len(all_serials), context, all_serials ) wait_kwargs: Dict[str, Any] = {} @@ -1189,7 +1173,7 @@ def handle( results = self.ctx.results log.debug("ENTER: POAPHandler.handle()") - log.info(f"Processing POAP for {len(proposed_config)} switch config(s)") + log.info("Processing POAP for %s switch config(s)", len(proposed_config)) # Classify entries first so check mode can report per-operation counts bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] @@ -1229,13 +1213,11 @@ def handle( ] if poap_extra: log.warning( - f"Swap ({switch_cfg.seed_ip}): extra fields in 'poap' will be " - f"ignored during swap: {poap_extra}" + "Swap (%s): extra fields in 'poap' will be ignored during swap: %s", switch_cfg.seed_ip, poap_extra ) if preprov_extra: log.warning( - f"Swap ({switch_cfg.seed_ip}): extra fields in 'preprovision' will be " - f"ignored during swap: {preprov_extra}" + "Swap (%s): extra fields in 'preprovision' will be ignored during swap: %s", switch_cfg.seed_ip, preprov_extra ) swap_entries.append( (switch_cfg, switch_cfg.poap, switch_cfg.preprovision) @@ -1246,21 +1228,16 @@ def handle( bootstrap_entries.append((switch_cfg, switch_cfg.poap)) else: log.warning( - f"Switch config for {switch_cfg.seed_ip} has no poap or preprovision " - f"block — skipping" - ) + "Switch config for %s has no poap or preprovision block — skipping", switch_cfg.seed_ip) log.info( - f"POAP classification: {len(bootstrap_entries)} bootstrap, " - f"{len(preprov_entries)} pre-provision, " - f"{len(swap_entries)} swap" + "POAP classification: %s bootstrap, %s pre-provision, %s swap", len(bootstrap_entries), len(preprov_entries), len(swap_entries) ) # Check mode — preview only if nd.module.check_mode: log.info( - f"Check mode: would bootstrap {len(bootstrap_entries)}, " - f"pre-provision {len(preprov_entries)}, swap {len(swap_entries)}" + "Check mode: would bootstrap %s, pre-provision %s, swap %s", len(bootstrap_entries), len(preprov_entries), len(swap_entries) ) results.action = "poap" results.operation_type = OperationType.CREATE @@ -1290,9 +1267,7 @@ def handle( existing_sw.switch_id, ): log.info( - f"Bootstrap: IP '{switch_cfg.seed_ip}' with serial " - f"'{poap_cfg.serial_number}' already in fabric " - f"— idempotent, skipping" + "Bootstrap: IP '%s' with serial '%s' already in fabric — idempotent, skipping", switch_cfg.seed_ip, poap_cfg.serial_number ) else: active_bootstrap.append((switch_cfg, poap_cfg)) @@ -1302,8 +1277,7 @@ def handle( for switch_cfg, preprov_cfg in preprov_entries: if switch_cfg.seed_ip in existing_by_ip: log.info( - f"PreProvision: IP '{switch_cfg.seed_ip}' already in fabric " - f"— idempotent, skipping" + "PreProvision: IP '%s' already in fabric — idempotent, skipping", switch_cfg.seed_ip ) else: active_preprov.append((switch_cfg, preprov_cfg)) @@ -1324,9 +1298,7 @@ def handle( pp_model = self._build_preprovision_model(switch_cfg, preprov_cfg) preprov_models.append(pp_model) log.info( - f"Built pre-provision model for serial=" - f"{pp_model.serial_number}, hostname={pp_model.hostname}, " - f"ip={pp_model.ip}" + "Built pre-provision model for serial=%s, hostname=%s, ip=%s", pp_model.serial_number, pp_model.hostname, pp_model.ip ) if preprov_models: @@ -1361,13 +1333,12 @@ def _handle_poap_bootstrap( log = self.ctx.log log.debug("ENTER: _handle_poap_bootstrap()") - log.info(f"Processing {len(bootstrap_entries)} bootstrap entries") + log.info("Processing %s bootstrap entries", len(bootstrap_entries)) bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) bootstrap_idx = build_bootstrap_index(bootstrap_switches) log.debug( - f"Bootstrap index contains {len(bootstrap_idx)} switch(es): " - f"{list(bootstrap_idx.keys())}" + "Bootstrap index contains %s switch(es): %s", len(bootstrap_idx), list(bootstrap_idx.keys()) ) import_models: List[BootstrapImportSwitchModel] = [] @@ -1390,8 +1361,7 @@ def _handle_poap_bootstrap( ) import_models.append(model) log.info( - f"Built bootstrap model for serial={serial}, " - f"hostname={model.hostname}, ip={model.ip}" + "Built bootstrap model for serial=%s, hostname=%s, ip=%s", serial, model.hostname, model.ip ) if not import_models: @@ -1433,7 +1403,7 @@ def _build_bootstrap_import_model( """ log = self.ctx.log log.debug( - f"ENTER: _build_bootstrap_import_model(serial={poap_cfg.serial_number})" + "ENTER: _build_bootstrap_import_model(serial=%s)", poap_cfg.serial_number ) bs = bootstrap_data or {} @@ -1463,8 +1433,7 @@ def _build_bootstrap_import_model( api_hostname = bs.get("hostname", "") if api_hostname and api_hostname != user_hostname: log.warning( - f"Bootstrap ({serial_number}): API hostname '{api_hostname}' overrides " - f"user-provided hostname '{user_hostname}'. Using API value." + "Bootstrap (%s): API hostname '%s' overrides user-provided hostname '%s'. Using API value.", serial_number, api_hostname, user_hostname ) hostname = api_hostname else: @@ -1478,8 +1447,7 @@ def _build_bootstrap_import_model( api_role = SwitchRole.normalize(api_role_raw) if api_role and api_role != switch_role: log.warning( - f"Bootstrap ({serial_number}): API role '{api_role_raw}' overrides " - f"user-provided role '{switch_role}'. Using API value." + "Bootstrap (%s): API role '%s' overrides user-provided role '%s'. Using API value.", serial_number, api_role_raw, switch_role ) switch_role = api_role except Exception: @@ -1521,7 +1489,7 @@ def _build_bootstrap_import_model( ) log.debug( - f"EXIT: _build_bootstrap_import_model() -> {bootstrap_model.serial_number}" + "EXIT: _build_bootstrap_import_model() -> %s", bootstrap_model.serial_number ) return bootstrap_model @@ -1549,11 +1517,10 @@ def _import_bootstrap_switches( request_model = ImportBootstrapSwitchesRequestModel(switches=models) payload = request_model.to_payload() - log.debug(f"importBootstrap endpoint: {endpoint.path}") - log.debug(f"importBootstrap payload (masked): {mask_password(payload)}") + log.debug("importBootstrap endpoint: %s", endpoint.path) + log.debug("importBootstrap payload (masked): %s", mask_password(payload)) log.info( - f"Importing {len(models)} bootstrap switch(es): " - f"{[m.serial_number for m in models]}" + "Importing %s bootstrap switch(es): %s", len(models), [m.serial_number for m in models] ) try: @@ -1584,7 +1551,7 @@ def _import_bootstrap_switches( log.error(msg) nd.module.fail_json(msg=msg) - log.info(f"importBootstrap API response success: {result.get('success')}") + log.info("importBootstrap API response success: %s", result.get('success')) log.debug("EXIT: _import_bootstrap_switches()") def _build_preprovision_model( @@ -1603,7 +1570,7 @@ def _build_preprovision_model( """ log = self.ctx.log log.debug( - f"ENTER: _build_preprovision_model(serial={preprov_cfg.serial_number})" + "ENTER: _build_preprovision_model(serial=%s)", preprov_cfg.serial_number ) serial_number = preprov_cfg.serial_number @@ -1639,7 +1606,7 @@ def _build_preprovision_model( switchRole=switch_role, ) - log.debug(f"EXIT: _build_preprovision_model() -> {preprov_model.serial_number}") + log.debug("EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number) return preprov_model def _preprovision_switches( @@ -1666,11 +1633,10 @@ def _preprovision_switches( request_model = PreProvisionSwitchesRequestModel(switches=models) payload = request_model.to_payload() - log.debug(f"preProvision endpoint: {endpoint.path}") - log.debug(f"preProvision payload (masked): {mask_password(payload)}") + log.debug("preProvision endpoint: %s", endpoint.path) + log.debug("preProvision payload (masked): %s", mask_password(payload)) log.info( - f"Pre-provisioning {len(models)} switch(es): " - f"{[m.serial_number for m in models]}" + "Pre-provisioning %s switch(es): %s", len(models), [m.serial_number for m in models] ) try: @@ -1701,7 +1667,7 @@ def _preprovision_switches( log.error(msg) nd.module.fail_json(msg=msg) - log.info(f"preProvision API response success: {result.get('success')}") + log.info("preProvision API response success: %s", result.get('success')) log.debug("EXIT: _preprovision_switches()") def _handle_poap_swap( @@ -1728,7 +1694,7 @@ def _handle_poap_swap( fabric = self.ctx.fabric log.debug("ENTER: _handle_poap_swap()") - log.info(f"Processing {len(swap_entries)} POAP swap entries") + log.info("Processing %s POAP swap entries", len(swap_entries)) # ------------------------------------------------------------------ # Step 1: Validate preprovision serials exist in fabric inventory @@ -1739,8 +1705,7 @@ def _handle_poap_swap( if sw.switch_id } log.debug( - f"Fabric inventory contains {len(fabric_index)} switch(es): " - f"{list(fabric_index.keys())}" + "Fabric inventory contains %s switch(es): %s", len(fabric_index), list(fabric_index.keys()) ) for switch_cfg, poap_cfg, preprov_cfg in swap_entries: @@ -1754,8 +1719,7 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) log.info( - f"Validated: pre-provisioned serial '{old_serial}' exists " - f"in fabric inventory" + "Validated: pre-provisioned serial '%s' exists in fabric inventory", old_serial ) # ------------------------------------------------------------------ @@ -1764,8 +1728,7 @@ def _handle_poap_swap( bootstrap_switches = query_bootstrap_switches(nd, fabric, log) bootstrap_index = build_bootstrap_index(bootstrap_switches) log.debug( - f"Bootstrap list contains {len(bootstrap_index)} switch(es): " - f"{list(bootstrap_index.keys())}" + "Bootstrap list contains %s switch(es): %s", len(bootstrap_index), list(bootstrap_index.keys()) ) for switch_cfg, poap_cfg, preprov_cfg in swap_entries: @@ -1780,7 +1743,7 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) log.info( - f"Validated: new serial '{new_serial}' exists in " f"bootstrap list" + "Validated: new serial '%s' exists in bootstrap list", new_serial ) # ------------------------------------------------------------------ @@ -1791,8 +1754,7 @@ def _handle_poap_swap( new_serial = poap_cfg.serial_number log.info( - f"Swapping serial for pre-provisioned switch: " - f"{old_serial} → {new_serial}" + "Swapping serial for pre-provisioned switch: %s → %s", old_serial, new_serial ) endpoint = EpManageFabricsSwitchChangeSerialNumberPost() @@ -1802,8 +1764,8 @@ def _handle_poap_swap( request_body = ChangeSwitchSerialNumberRequestModel(newSwitchId=new_serial) payload = request_body.to_payload() - log.debug(f"changeSwitchSerialNumber endpoint: {endpoint.path}") - log.debug(f"changeSwitchSerialNumber payload: {payload}") + log.debug("changeSwitchSerialNumber endpoint: %s", endpoint.path) + log.debug("changeSwitchSerialNumber payload: %s", payload) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -1836,15 +1798,14 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) - log.info(f"Serial number swap successful: {old_serial} → {new_serial}") - + log.info("Serial number swap successful: %s → %s", old_serial, new_serial) # ------------------------------------------------------------------ # Step 4: Re-query bootstrap API for post-swap data # ------------------------------------------------------------------ post_swap_bootstrap = query_bootstrap_switches(nd, fabric, log) post_swap_index = build_bootstrap_index(post_swap_bootstrap) log.debug( - f"Post-swap bootstrap list contains " f"{len(post_swap_index)} switch(es)" + "Post-swap bootstrap list contains %s switch(es)", len(post_swap_index) ) # ------------------------------------------------------------------ @@ -1869,8 +1830,7 @@ def _handle_poap_swap( ) import_models.append(model) log.info( - f"Built bootstrap model for swapped serial={new_serial}, " - f"hostname={model.hostname}, ip={model.ip}" + "Built bootstrap model for swapped serial=%s, hostname=%s, ip=%s", new_serial, model.hostname, model.ip ) if not import_models: @@ -1900,8 +1860,7 @@ def _handle_poap_swap( ) log.info( - f"POAP swap completed successfully for {len(swap_entries)} " - f"switch(es): {[sn for sn, _cfg in switch_actions]}" + "POAP swap completed successfully for %s switch(es): %s", len(swap_entries), [sn for sn, _cfg in switch_actions] ) log.debug("EXIT: _handle_poap_swap()") @@ -1953,7 +1912,7 @@ def handle( results = self.ctx.results log.debug("ENTER: RMAHandler.handle()") - log.info(f"Processing RMA for {len(proposed_config)} switch config(s)") + log.info("Processing RMA for %s switch config(s)", len(proposed_config)) # Check mode — preview only if nd.module.check_mode: @@ -1973,7 +1932,7 @@ def handle( for switch_cfg in proposed_config: if not switch_cfg.rma: log.warning( - f"Switch config for {switch_cfg.seed_ip} has no RMA block — skipping" + "Switch config for %s has no RMA block — skipping", switch_cfg.seed_ip ) continue for rma_cfg in switch_cfg.rma: @@ -1989,7 +1948,7 @@ def handle( results.register_api_call() return - log.info(f"Found {len(rma_entries)} RMA entry/entries to process") + log.info("Found %s RMA entry/entries to process", len(rma_entries)) # Validate old switches exist and are in correct state old_switch_info = self._validate_prerequisites(rma_entries, existing) @@ -1998,8 +1957,7 @@ def handle( bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) bootstrap_idx = build_bootstrap_index(bootstrap_switches) log.debug( - f"Bootstrap index contains {len(bootstrap_idx)} switch(es): " - f"{list(bootstrap_idx.keys())}" + "Bootstrap index contains %s switch(es): %s", len(bootstrap_idx), list(bootstrap_idx.keys()) ) # Build and submit each RMA request @@ -2039,8 +1997,7 @@ def handle( old_switch_info[rma_cfg.old_serial_number], ) log.info( - f"Built RMA model: replacing {rma_cfg.old_serial_number} with " - f"{rma_model.new_switch_id}" + "Built RMA model: replacing %s with %s", rma_cfg.old_serial_number, rma_model.new_switch_id ) self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) @@ -2057,8 +2014,7 @@ def handle( # migration-mode phase. all_new_serials = [sn for sn, _cfg in switch_actions] log.info( - f"Waiting for {len(all_new_serials)} RMA replacement " - f"switch(es) to become ready: {all_new_serials}" + "Waiting for %s RMA replacement switch(es) to become ready: %s", len(all_new_serials), all_new_serials ) success = self.wait_utils.wait_for_rma_switch_ready(all_new_serials) if not success: @@ -2171,9 +2127,11 @@ def _validate_prerequisites( "switch_data": old_switch, } log.info( - f"RMA prerequisite check passed for old_serial " - f"'{old_serial}' (hostname={old_switch.hostname}, " - f"discovery={ad.discovery_status}, mode={ad.system_mode})" + "RMA prerequisite check passed for old_serial '%s' (hostname=%s, discovery=%s, mode=%s)", + old_serial, + old_switch.hostname, + ad.discovery_status, + ad.system_mode, ) log.debug("EXIT: _validate_prerequisites()") @@ -2199,8 +2157,7 @@ def _build_rma_model( """ log = self.ctx.log log.debug( - f"ENTER: _build_rma_model(new={rma_cfg.new_serial_number}, " - f"old={rma_cfg.old_serial_number})" + "ENTER: _build_rma_model(new=%s, old=%s)", rma_cfg.new_serial_number, rma_cfg.old_serial_number ) # User config fields @@ -2256,7 +2213,7 @@ def _build_rma_model( ), ) - log.debug(f"EXIT: _build_rma_model() -> newSwitchId={rma_model.new_switch_id}") + log.debug("EXIT: _build_rma_model() -> newSwitchId=%s", rma_model.new_switch_id) return rma_model def _provision_rma_switch( @@ -2285,9 +2242,9 @@ def _provision_rma_switch( payload = rma_model.to_payload() - log.info(f"RMA: Replacing {old_switch_id} with {rma_model.new_switch_id}") - log.debug(f"RMA endpoint: {endpoint.path}") - log.debug(f"RMA payload (masked): {mask_password(payload)}") + log.info("RMA: Replacing %s with %s", old_switch_id, rma_model.new_switch_id) + log.debug("RMA endpoint: %s", endpoint.path) + log.debug("RMA payload (masked): %s", mask_password(payload)) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) @@ -2320,7 +2277,7 @@ def _provision_rma_switch( log.error(msg) nd.module.fail_json(msg=msg) - log.info(f"RMA provision API response success: {result.get('success')}") + log.info("RMA provision API response success: %s", result.get('success')) log.debug("EXIT: _provision_rma_switch()") @@ -2413,7 +2370,7 @@ def __init__( self.poap_handler = POAPHandler(self.ctx, self.fabric_ops, self.wait_utils) self.rma_handler = RMAHandler(self.ctx, self.fabric_ops, self.wait_utils) - log.info(f"Initialized NDSwitchResourceModule for fabric: {self.fabric}") + log.info("Initialized NDSwitchResourceModule for fabric: %s", self.fabric) def exit_json(self) -> None: """Finalize collected results and exit the Ansible module. @@ -2471,7 +2428,7 @@ def manage_state(self) -> None: Returns: None. """ - self.log.info(f"Managing state: {self.state}") + self.log.info("Managing state: %s", self.state) # gathered — read-only, no config accepted if self.state == "gathered": @@ -2518,8 +2475,10 @@ def manage_state(self) -> None: self.output.assign(proposed=output_proposed) self.log.info( - f"Config partition: {len(normal_configs)} normal, " - f"{len(poap_configs)} poap, {len(rma_configs)} rma" + "Config partition: %s normal, %s poap, %s rma", + len(normal_configs), + len(poap_configs), + len(rma_configs), ) # POAP and RMA are only valid with state=merged @@ -2537,9 +2496,10 @@ def manage_state(self) -> None: ] if configs_to_discover: self.log.info( - f"Discovery needed for {len(configs_to_discover)}/" - f"{len(normal_configs)} switch(es) — " - f"{len(normal_configs) - len(configs_to_discover)} already in fabric" + "Discovery needed for %s/%s switch(es) — %s already in fabric", + len(configs_to_discover), + len(normal_configs), + len(normal_configs) - len(configs_to_discover), ) discovered_data = self.discovery.discover(configs_to_discover) else: @@ -2592,8 +2552,8 @@ def _handle_merged_state( """ self.log.debug("ENTER: _handle_merged_state()") self.log.info("Handling merged state") - self.log.debug(f"Proposed configs: {len(self.proposed)}") - self.log.debug(f"Existing switches: {len(self.existing)}") + self.log.debug("Proposed configs: %s", len(self.proposed)) + self.log.debug("Existing switches: %s", len(self.existing)) if not self.proposed: self.log.info("No configurations provided for merged state") @@ -2619,9 +2579,10 @@ def _handle_merged_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - f"Check mode: would add {len(switches_to_add)}, process " - f"{len(migration_switches)} migration switch(es), " - f"save_deploy_required={idempotent_save_req}" + "Check mode: would add %s, process %s migration switch(es), save_deploy_required=%s", + len(switches_to_add), + len(migration_switches), + idempotent_save_req, ) self.results.action = "merge" self.results.state = self.state @@ -2654,7 +2615,7 @@ def _handle_merged_state( add_configs.append(cfg) else: self.log.warning( - f"No config found for switch {sw.fabric_management_ip}, skipping add" + "No config found for switch %s, skipping add", sw.fabric_management_ip ) if add_configs: @@ -2676,7 +2637,7 @@ def _handle_merged_state( pairs.append((cfg, disc)) else: self.log.warning( - f"No discovery data for {cfg.seed_ip}, skipping" + "No discovery data for %s, skipping", cfg.seed_ip ) if not pairs: @@ -2777,10 +2738,10 @@ def _merged_handle_idempotent( ) if status != ConfigSyncStatus.IN_SYNC: self.log.info( - f"Switch {sw.fabric_management_ip} ({sw.switch_id}) is " - f"config-idempotent but configSyncStatus is " - f"'{getattr(status, 'value', status) if status else 'unknown'}' — " - f"will run config save and deploy" + "Switch %s (%s) is config-idempotent but configSyncStatus is '%s' — will run config save and deploy", + sw.fabric_management_ip, + sw.switch_id, + getattr(status, "value", status) if status else "unknown", ) return True @@ -2844,8 +2805,7 @@ def _handle_overridden_state( n_add = len(diff.get("to_add", [])) n_migrate = len(diff.get("migration_mode", [])) self.log.info( - f"Check mode: would delete {n_delete}, delete-and-re-add " - f"{n_update}, add {n_add}, migrate {n_migrate}" + "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", n_delete, n_update, n_add, n_migrate ) self.results.action = "override" self.results.state = self.state @@ -2869,8 +2829,7 @@ def _handle_overridden_state( # Phase 1: Switches not in proposed config for sw in diff.get("to_delete", []): self.log.info( - f"Marking for deletion (not in proposed): " - f"{sw.fabric_management_ip} ({sw.switch_id})" + "Marking for deletion (not in proposed): %s (%s)", sw.fabric_management_ip, sw.switch_id ) switches_to_delete.append(sw) self._log_operation("delete", sw.fabric_management_ip) @@ -2888,8 +2847,7 @@ def _handle_overridden_state( ) if existing_sw: self.log.info( - f"Marking for deletion (re-add update): " - f"{existing_sw.fabric_management_ip} ({existing_sw.switch_id})" + "Marking for deletion (re-add update): %s (%s)", existing_sw.fabric_management_ip, existing_sw.switch_id ) switches_to_delete.append(existing_sw) self._log_operation( @@ -2919,9 +2877,9 @@ def _handle_overridden_state( ] if configs_needing_rediscovery: self.log.info( - f"Re-discovering {len(configs_needing_rediscovery)} switch(es) " - f"after deletion for re-add: " - f"{[cfg.seed_ip for cfg in configs_needing_rediscovery]}" + "Re-discovering %s switch(es) after deletion for re-add: %s", + len(configs_needing_rediscovery), + [cfg.seed_ip for cfg in configs_needing_rediscovery], ) fresh_discovered = self.discovery.discover(configs_needing_rediscovery) discovered_data = {**(discovered_data or {}), **fresh_discovered} @@ -2941,10 +2899,10 @@ def _handle_gathered_state(self) -> None: None. """ self.log.debug("ENTER: _handle_gathered_state()") - self.log.info(f"Gathering inventory for fabric '{self.fabric}'") + self.log.info("Gathering inventory for fabric '%s'", self.fabric) if not self.existing: - self.log.info(f"Fabric '{self.fabric}' has no switches in inventory") + self.log.info("Fabric '%s' has no switches in inventory", self.fabric) self.results.action = "gathered" self.results.state = self.state @@ -2955,7 +2913,7 @@ def _handle_gathered_state(self) -> None: self.results.register_api_call() self.log.info( - f"Gathered {len(list(self.existing))} switch(es) from fabric '{self.fabric}'" + "Gathered %s switch(es) from fabric '%s'", len(list(self.existing)), self.fabric ) self.log.debug("EXIT: _handle_gathered_state()") @@ -2977,8 +2935,7 @@ def _handle_deleted_state( if proposed_config is None: switches_to_delete = list(self.existing) self.log.info( - f"No proposed config — targeting all {len(switches_to_delete)} " - f"existing switch(es) for deletion" + "No proposed config — targeting all %s existing switch(es) for deletion", len(switches_to_delete) ) for sw in switches_to_delete: self._log_operation("delete", sw.fabric_management_ip) @@ -2987,7 +2944,7 @@ def _handle_deleted_state( for switch_config in proposed_config: identifier = switch_config.seed_ip self.log.debug( - f"Looking for switch to delete with seed IP: {identifier}" + "Looking for switch to delete with seed IP: %s", identifier ) existing_switch = next( ( @@ -2999,13 +2956,13 @@ def _handle_deleted_state( ) if existing_switch: self.log.info( - f"Marking for deletion: {identifier} ({existing_switch.switch_id})" + "Marking for deletion: %s (%s)", identifier, existing_switch.switch_id ) switches_to_delete.append(existing_switch) else: - self.log.info(f"Switch not found for deletion: {identifier}") + self.log.info("Switch not found for deletion: %s", identifier) - self.log.info(f"Total switches marked for deletion: {len(switches_to_delete)}") + self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) if not switches_to_delete: self.log.info("No switches to delete") return @@ -3013,7 +2970,7 @@ def _handle_deleted_state( # Check mode — preview only if self.nd.module.check_mode: self.log.info( - f"Check mode: would delete {len(switches_to_delete)} switch(es)" + "Check mode: would delete %s switch(es)", len(switches_to_delete) ) self.results.action = "delete" self.results.state = self.state @@ -3030,7 +2987,7 @@ def _handle_deleted_state( return self.log.info( - f"Proceeding to delete {len(switches_to_delete)} switch(es) from fabric" + "Proceeding to delete %s switch(es) from fabric", len(switches_to_delete) ) self.fabric_ops.bulk_delete(switches_to_delete) for sw in switches_to_delete: @@ -3049,8 +3006,8 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: """ endpoint = EpManageFabricsSwitchesGet() endpoint.fabric_name = self.fabric - self.log.debug(f"Querying all switches with endpoint: {endpoint.path}") - self.log.debug(f"Query verb: {endpoint.verb}") + self.log.debug("Querying all switches with endpoint: %s", endpoint.path) + self.log.debug("Query verb: %s", endpoint.verb) try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) @@ -3066,7 +3023,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: else: switches = [] - self.log.debug(f"Queried {len(switches)} switches from fabric {self.fabric}") + self.log.debug("Queried %s switches from fabric %s", len(switches), self.fabric) return switches # ===================================================================== diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index 6447a07d..e021fda0 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -199,7 +199,7 @@ def group_switches_by_credentials( ) groups.setdefault(group_key, []).append(switch) - log.info(f"Grouped {len(switches)} switches into {len(groups)} credential group(s)") + log.info("Grouped %s switches into %s credential group(s)", len(switches), len(groups)) for idx, (key, group_switches) in enumerate(groups.items(), 1): username, _pw_hash, auth_proto, platform_type, preserve_config = key @@ -212,10 +212,13 @@ def group_switches_by_credentials( else str(platform_type) ) log.debug( - f"Group {idx}: {len(group_switches)} switches with " - f"username={username}, auth={auth_value}, " - f"platform={platform_value}, " - f"preserve_config={preserve_config}" + "Group %s: %s switches with username=%s, auth=%s, platform=%s, preserve_config=%s", + idx, + len(group_switches), + username, + auth_value, + platform_value, + preserve_config, ) return groups @@ -245,7 +248,7 @@ def query_bootstrap_switches( endpoint = EpManageFabricsBootstrapGet() endpoint.fabric_name = fabric - log.debug(f"Bootstrap endpoint: {endpoint.path}") + log.debug("Bootstrap endpoint: %s", endpoint.path) try: result = nd.request( @@ -264,7 +267,7 @@ def query_bootstrap_switches( else: switches = [] - log.info(f"Bootstrap API returned {len(switches)} switch(es) in POAP loop") + log.info("Bootstrap API returned %s switch(es) in POAP loop", len(switches)) log.debug("EXIT: query_bootstrap_switches()") return switches @@ -423,7 +426,7 @@ def wait_for_switch_manageable( Returns: ``True`` if all switches are manageable, ``False`` on timeout. """ - self.log.info(f"Waiting for switches to become manageable: {serial_numbers}") + self.log.info("Waiting for switches to become manageable: %s", serial_numbers) # Phase 1 + 2: migration → normal if not self._wait_for_system_mode(serial_numbers): @@ -476,8 +479,7 @@ def wait_for_rma_switch_ready( ``True`` if all switches reach ``ok`` status, ``False`` on timeout. """ self.log.info( - f"Waiting for RMA replacement switch(es) to become ready " - f"(skipping migration-mode phase): {serial_numbers}" + "Waiting for RMA replacement switch(es) to become ready (skipping migration-mode phase): %s", serial_numbers ) # Phase 1: wait until all new serials appear in the fabric inventory. @@ -507,23 +509,23 @@ def wait_for_discovery( attempts = max_attempts or 30 interval = wait_interval or self.wait_interval - self.log.info(f"Waiting for discovery of: {seed_ip}") + self.log.info("Waiting for discovery of: %s", seed_ip) for attempt in range(attempts): status = self._get_discovery_status(seed_ip) if status and status.get("status") in self.MANAGEABLE_STATUSES: - self.log.info(f"Discovery completed for {seed_ip}") + self.log.info("Discovery completed for %s", seed_ip) return status if status and status.get("status") in self.FAILED_STATUSES: - self.log.error(f"Discovery failed for {seed_ip}: {status}") + self.log.error("Discovery failed for %s: %s", seed_ip, status) return None - self.log.debug(f"Discovery attempt {attempt + 1}/{attempts} for {seed_ip}") + self.log.debug("Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip) time.sleep(interval) - self.log.warning(f"Discovery timeout for {seed_ip}") + self.log.warning("Discovery timeout for %s", seed_ip) return None # ===================================================================== @@ -597,17 +599,16 @@ def _poll_system_mode( ) if not remaining: - self.log.info(f"All switches {label} mode (attempt {attempt})") + self.log.info("All switches %s mode (attempt %s)", label, attempt) return remaining pending = remaining self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " - f"switch(es) waiting to {label}: {pending}" + "Attempt %s/%s: %s switch(es) waiting to %s: %s", attempt, self.max_attempts, len(pending), label, pending ) time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) - self.log.warning(f"Timeout waiting for switches to {label}: {pending}") + self.log.warning("Timeout waiting for switches to %s: %s", label, pending) return None # ===================================================================== @@ -718,20 +719,18 @@ def _wait_for_discovery_state( if not pending: self.log.info( - f"All switches reached '{target_state}' state " - f"(attempt {attempt})" + "All switches reached '%s' state (attempt %s)", target_state, attempt ) return True self._trigger_rediscovery(pending) self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " - f"switch(es) not yet '{target_state}': {pending}" + "Attempt %s/%s: %s switch(es) not yet '%s': %s", attempt, self.max_attempts, len(pending), target_state, pending ) time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) self.log.warning( - f"Timeout waiting for '{target_state}' state: {serial_numbers}" + "Timeout waiting for '%s' state: %s", target_state, serial_numbers ) return False @@ -758,8 +757,7 @@ def _wait_for_switches_in_fabric( """ pending = list(serial_numbers) self.log.info( - f"Waiting for {len(pending)} switch(es) to appear in " - f"fabric inventory: {pending}" + "Waiting for %s switch(es) to appear in fabric inventory: %s", len(pending), pending ) for attempt in range(1, self.max_attempts + 1): @@ -777,18 +775,16 @@ def _wait_for_switches_in_fabric( if not pending: self.log.info( - f"All RMA switch(es) now visible in fabric inventory " - f"(attempt {attempt})" + "All RMA switch(es) now visible in fabric inventory (attempt %s)", attempt ) return True self.log.debug( - f"Attempt {attempt}/{self.max_attempts}: {len(pending)} " - f"switch(es) not yet in fabric: {pending}" + "Attempt %s/%s: %s switch(es) not yet in fabric: %s", attempt, self.max_attempts, len(pending), pending ) time.sleep(self.wait_interval) - self.log.warning(f"Timeout waiting for switches to appear in fabric: {pending}") + self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) return False def _fetch_switch_data( @@ -810,7 +806,7 @@ def _fetch_switch_data( return None return switch_data except Exception as e: - self.log.error(f"Failed to fetch switch data: {e}") + self.log.error("Failed to fetch switch data: %s", e) return None def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: @@ -823,7 +819,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: return payload = {"switchIds": serial_numbers} - self.log.info(f"Triggering rediscovery for: {serial_numbers}") + self.log.info("Triggering rediscovery for: %s", serial_numbers) try: self.nd.request( self.ep_rediscover.path, @@ -831,7 +827,7 @@ def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: data=payload, ) except Exception as e: - self.log.warning(f"Failed to trigger rediscovery: {e}") + self.log.warning("Failed to trigger rediscovery: %s", e) def _get_discovery_status( self, @@ -855,7 +851,7 @@ def _get_discovery_status( return switch return None except Exception as e: - self.log.debug(f"Discovery status check failed: {e}") + self.log.debug("Discovery status check failed: %s", e) return None def _is_greenfield_debug_enabled(self) -> bool: @@ -873,15 +869,15 @@ def _is_greenfield_debug_enabled(self) -> bool: try: fabric_info = self.fabric_utils.get_fabric_info() self.log.debug( - f"Fabric info retrieved for greenfield check: " f"{fabric_info}" + "Fabric info retrieved for greenfield check: %s", fabric_info ) flag = ( fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() ) - self.log.debug(f"Greenfield debug flag value: '{flag}'") + self.log.debug("Greenfield debug flag value: '%s'", flag) self._greenfield_debug_enabled = flag == "enable" except Exception as e: - self.log.debug(f"Failed to get greenfield debug flag: {e}") + self.log.debug("Failed to get greenfield debug flag: %s", e) self._greenfield_debug_enabled = False return self._greenfield_debug_enabled diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 5ae2c393..87a21f98 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -698,7 +698,7 @@ def get_argument_spec(cls) -> Dict[str, Any]: elements="dict", options=dict( seed_ip=dict(type="str", required=True), - username=dict(type="str", default="admin"), + username=dict(type="str"), password=dict(type="str", no_log=True), auth_proto=dict( type="str", @@ -733,11 +733,6 @@ def get_argument_spec(cls) -> Dict[str, Any]: ], ), preserve_config=dict(type="bool", default=False), - platform_type=dict( - type="str", - default="nx-os", - choices=["nx-os", "ios-xe"], - ), poap=dict( type="dict", options=dict( diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 98686c65..34ae5f38 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -74,7 +74,6 @@ - Login username for the switch. - For POAP and RMA, should be C(admin). type: str - default: admin password: description: - Login password for the switch. @@ -104,14 +103,6 @@ - Set to C(false) for greenfield deployment, C(true) for brownfield. type: bool default: false - platform_type: - description: - - Platform type of the switch. - type: str - default: nx-os - choices: - - nx-os - - ios-xe poap: description: - Bootstrap POAP config for the switch. @@ -492,12 +483,12 @@ def main(): sw_module.manage_state() # Exit with results - log.info(f"State management completed successfully. Changed: {results.changed}") + log.info("State management completed successfully. Changed: %s", results.changed) sw_module.exit_json() except NDModuleError as error: # NDModule-specific errors (API failures, authentication issues, etc.) - log.error(f"NDModule error: {error.msg}") + log.error("NDModule error: %s", error.msg) # Try to get response from RestSend if available try: @@ -523,13 +514,13 @@ def main(): if output_level == "debug": results.final_result["error_details"] = error.to_dict() - log.error(f"Module failed: {results.final_result}") + log.error("Module failed: %s", results.final_result) module.fail_json(msg=error.msg, **results.final_result) except Exception as error: # Unexpected errors - log.error(f"Unexpected error during module execution: {str(error)}") - log.error(f"Error type: {error.__class__.__name__}") + log.error("Unexpected error during module execution: %s", str(error)) + log.error("Error type: %s", error.__class__.__name__) # Build failed result results.response_current = { From 9bb268bded8b5ec12f674393111d78ce2ec93587 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 30 Mar 2026 15:12:33 +0530 Subject: [PATCH 081/109] Black Formatting R3 --- .../manage_switches/nd_switch_resources.py | 244 +++++++++++++----- plugins/module_utils/manage_switches/utils.py | 46 +++- plugins/modules/nd_manage_switches.py | 4 +- 3 files changed, 221 insertions(+), 73 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 2d78cb6a..ec6caf84 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -214,10 +214,14 @@ def validate_configs( operation_types = {c.operation_type for c in validated_configs} log.info( - "Successfully validated %s configuration(s) with operation type(s): %s", len(validated_configs), operation_types + "Successfully validated %s configuration(s) with operation type(s): %s", + len(validated_configs), + operation_types, ) log.debug( - "EXIT: validate_configs() -> %s configs, operation_types=%s", len(validated_configs), operation_types + "EXIT: validate_configs() -> %s configs, operation_types=%s", + len(validated_configs), + operation_types, ) return validated_configs @@ -241,7 +245,9 @@ def compute_changes( """ log.debug("ENTER: compute_changes()") log.debug( - "Comparing %s proposed vs %s existing switches", len(proposed), len(existing) + "Comparing %s proposed vs %s existing switches", + len(proposed), + len(existing), ) # Build indexes for O(1) lookups @@ -249,7 +255,9 @@ def compute_changes( existing_by_ip = {sw.fabric_management_ip: sw for sw in existing} log.debug( - "Indexes built — existing_by_id: %s, existing_by_ip: %s", list(existing_by_id.keys()), list(existing_by_ip.keys()) + "Indexes built — existing_by_id: %s, existing_by_ip: %s", + list(existing_by_id.keys()), + list(existing_by_ip.keys()), ) # Only user-controllable fields populated by both discovery and @@ -288,19 +296,29 @@ def compute_changes( if not existing_sw: log.info( - "Switch %s (id=%s) not found in existing — marking to_add", ip, sid) + "Switch %s (id=%s) not found in existing — marking to_add", ip, sid + ) changes["to_add"].append(prop_sw) continue log.debug( - "Switch %s (id=%s) found in existing with %s match %s", ip, sid, match_key, existing_sw + "Switch %s (id=%s) found in existing with %s match %s", + ip, + sid, + match_key, + existing_sw, ) log.debug( - "Switch %s matched existing by %s (existing_id=%s)", ip, match_key, existing_sw.switch_id + "Switch %s matched existing by %s (existing_id=%s)", + ip, + match_key, + existing_sw.switch_id, ) if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: - log.info("Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id) + log.info( + "Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id + ) changes["migration_mode"].append(prop_sw) continue @@ -321,12 +339,17 @@ def compute_changes( if prop_dict.get(k) != existing_dict.get(k) } log.info( - "Switch %s has differences — marking to_update. Changed fields: %s", ip, diff_keys + "Switch %s has differences — marking to_update. Changed fields: %s", + ip, + diff_keys, ) proposed_diff = {k: prop_dict.get(k) for k in diff_keys} existing_diff = {k: existing_dict.get(k) for k in diff_keys} log.debug( - "Switch %s diff detail — proposed: %s, existing: %s", ip, proposed_diff, existing_diff + "Switch %s diff detail — proposed: %s, existing: %s", + ip, + proposed_diff, + existing_diff, ) changes["to_update"].append(prop_sw) @@ -335,7 +358,9 @@ def compute_changes( for existing_sw in existing: if existing_sw.switch_id not in proposed_ids: log.info( - "Existing switch %s (%s) not in proposed — marking to_delete", existing_sw.fabric_management_ip, existing_sw.switch_id + "Existing switch %s (%s) not in proposed — marking to_delete", + existing_sw.fabric_management_ip, + existing_sw.switch_id, ) changes["to_delete"].append(existing_sw) @@ -436,7 +461,10 @@ def validate_switch_api_fields( pulled.append("config_data (gateway + models)") if pulled: log.info( - "%s serial '%s': the following fields were not provided and will be sourced from the bootstrap API: %s", context, serial, ', '.join(pulled) + "%s serial '%s': the following fields were not provided and will be sourced from the bootstrap API: %s", + context, + serial, + ", ".join(pulled), ) else: log.debug("%s field validation passed for serial '%s'", context, serial) @@ -485,7 +513,9 @@ def discover( password = switches[0].password log.debug( - "Discovering group: %s switches with username=%s", len(switches), username + "Discovering group: %s switches with username=%s", + len(switches), + username, ) try: discovered_batch = self.bulk_discover( @@ -553,7 +583,7 @@ def bulk_discover( ) payload = discovery_request.to_payload() - log.info("Bulk discovering %s switches: %s", len(seed_ips), ', '.join(seed_ips)) + log.info("Bulk discovering %s switches: %s", len(seed_ips), ", ".join(seed_ips)) log.debug("Discovery endpoint: %s", endpoint.path) log.debug("Discovery payload (password masked): %s", mask_password(payload)) @@ -611,7 +641,10 @@ def bulk_discover( if status in ("manageable", "ok"): discovered_results[ip] = discovered log.info( - "Switch %s (%s) discovered successfully - status: %s", ip, serial_number, status + "Switch %s (%s) discovered successfully - status: %s", + ip, + serial_number, + status, ) elif status == "alreadymanaged": log.info("Switch %s (%s) is already managed", ip, serial_number) @@ -619,7 +652,10 @@ def bulk_discover( else: reason = discovered.get("statusReason", "Unknown") log.error( - "Switch %s discovery failed - status: %s, reason: %s", ip, status, reason + "Switch %s discovery failed - status: %s, reason: %s", + ip, + status, + reason, ) for seed_ip in seed_ips: @@ -627,7 +663,9 @@ def bulk_discover( log.warning("Switch %s not found in discovery response", seed_ip) log.info( - "Bulk discovery completed: %s/%s switches successful", len(discovered_results), len(seed_ips) + "Bulk discovery completed: %s/%s switches successful", + len(discovered_results), + len(seed_ips), ) log.debug("Discovered switches: %s", list(discovered_results.keys())) log.debug("EXIT: bulk_discover() -> %s discovered", len(discovered_results)) @@ -683,7 +721,8 @@ def build_proposed( else: proposed.append(existing_match) log.debug( - "Switch %s already in fabric inventory — using existing record (discovery skipped)", seed_ip + "Switch %s already in fabric inventory — using existing record (discovery skipped)", + seed_ip, ) continue @@ -775,7 +814,9 @@ def bulk_add( ) switch_discoveries.append(switch_discovery) log.debug( - "Prepared switch for add: %s (%s)", discovered.get('serialNumber'), discovered.get('hostname') + "Prepared switch for add: %s (%s)", + discovered.get("serialNumber"), + discovered.get("hostname"), ) if not switch_discoveries: @@ -796,7 +837,10 @@ def bulk_add( payload = add_request.to_payload() serial_numbers = [d.get("serialNumber") for _cfg, d in switches] log.info( - "Bulk adding %s switches to fabric %s: %s", len(switches), self.ctx.fabric, ', '.join(serial_numbers) + "Bulk adding %s switches to fabric %s: %s", + len(switches), + self.ctx.fabric, + ", ".join(serial_numbers), ) log.debug("Add endpoint: %s", endpoint.path) log.debug("Add payload (password masked): %s", mask_password(payload)) @@ -882,7 +926,10 @@ def bulk_delete( payload = {"switchIds": serial_numbers} log.info( - "Bulk removing %s switch(es) from fabric %s: %s", len(serial_numbers), self.ctx.fabric, serial_numbers + "Bulk removing %s switch(es) from fabric %s: %s", + len(serial_numbers), + self.ctx.fabric, + serial_numbers, ) log.debug("Delete endpoint: %s", endpoint.path) log.debug("Delete payload: %s", payload) @@ -953,7 +1000,9 @@ def bulk_save_credentials( payload = creds_request.to_payload() log.info( - "Saving credentials for %s switch(es): %s", len(serial_numbers), serial_numbers + "Saving credentials for %s switch(es): %s", + len(serial_numbers), + serial_numbers, ) log.debug("Credentials endpoint: %s", endpoint.path) log.debug("Credentials payload (masked): %s", mask_password(payload)) @@ -1088,7 +1137,10 @@ def post_add_processing( all_serials = [sn for sn, _cfg in switch_actions] log.info( - "Waiting for %s %s switch(es) to become manageable: %s", len(all_serials), context, all_serials + "Waiting for %s %s switch(es) to become manageable: %s", + len(all_serials), + context, + all_serials, ) wait_kwargs: Dict[str, Any] = {} @@ -1213,11 +1265,15 @@ def handle( ] if poap_extra: log.warning( - "Swap (%s): extra fields in 'poap' will be ignored during swap: %s", switch_cfg.seed_ip, poap_extra + "Swap (%s): extra fields in 'poap' will be ignored during swap: %s", + switch_cfg.seed_ip, + poap_extra, ) if preprov_extra: log.warning( - "Swap (%s): extra fields in 'preprovision' will be ignored during swap: %s", switch_cfg.seed_ip, preprov_extra + "Swap (%s): extra fields in 'preprovision' will be ignored during swap: %s", + switch_cfg.seed_ip, + preprov_extra, ) swap_entries.append( (switch_cfg, switch_cfg.poap, switch_cfg.preprovision) @@ -1228,16 +1284,24 @@ def handle( bootstrap_entries.append((switch_cfg, switch_cfg.poap)) else: log.warning( - "Switch config for %s has no poap or preprovision block — skipping", switch_cfg.seed_ip) + "Switch config for %s has no poap or preprovision block — skipping", + switch_cfg.seed_ip, + ) log.info( - "POAP classification: %s bootstrap, %s pre-provision, %s swap", len(bootstrap_entries), len(preprov_entries), len(swap_entries) + "POAP classification: %s bootstrap, %s pre-provision, %s swap", + len(bootstrap_entries), + len(preprov_entries), + len(swap_entries), ) # Check mode — preview only if nd.module.check_mode: log.info( - "Check mode: would bootstrap %s, pre-provision %s, swap %s", len(bootstrap_entries), len(preprov_entries), len(swap_entries) + "Check mode: would bootstrap %s, pre-provision %s, swap %s", + len(bootstrap_entries), + len(preprov_entries), + len(swap_entries), ) results.action = "poap" results.operation_type = OperationType.CREATE @@ -1267,7 +1331,9 @@ def handle( existing_sw.switch_id, ): log.info( - "Bootstrap: IP '%s' with serial '%s' already in fabric — idempotent, skipping", switch_cfg.seed_ip, poap_cfg.serial_number + "Bootstrap: IP '%s' with serial '%s' already in fabric — idempotent, skipping", + switch_cfg.seed_ip, + poap_cfg.serial_number, ) else: active_bootstrap.append((switch_cfg, poap_cfg)) @@ -1277,7 +1343,8 @@ def handle( for switch_cfg, preprov_cfg in preprov_entries: if switch_cfg.seed_ip in existing_by_ip: log.info( - "PreProvision: IP '%s' already in fabric — idempotent, skipping", switch_cfg.seed_ip + "PreProvision: IP '%s' already in fabric — idempotent, skipping", + switch_cfg.seed_ip, ) else: active_preprov.append((switch_cfg, preprov_cfg)) @@ -1298,7 +1365,10 @@ def handle( pp_model = self._build_preprovision_model(switch_cfg, preprov_cfg) preprov_models.append(pp_model) log.info( - "Built pre-provision model for serial=%s, hostname=%s, ip=%s", pp_model.serial_number, pp_model.hostname, pp_model.ip + "Built pre-provision model for serial=%s, hostname=%s, ip=%s", + pp_model.serial_number, + pp_model.hostname, + pp_model.ip, ) if preprov_models: @@ -1338,7 +1408,9 @@ def _handle_poap_bootstrap( bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) bootstrap_idx = build_bootstrap_index(bootstrap_switches) log.debug( - "Bootstrap index contains %s switch(es): %s", len(bootstrap_idx), list(bootstrap_idx.keys()) + "Bootstrap index contains %s switch(es): %s", + len(bootstrap_idx), + list(bootstrap_idx.keys()), ) import_models: List[BootstrapImportSwitchModel] = [] @@ -1361,7 +1433,10 @@ def _handle_poap_bootstrap( ) import_models.append(model) log.info( - "Built bootstrap model for serial=%s, hostname=%s, ip=%s", serial, model.hostname, model.ip + "Built bootstrap model for serial=%s, hostname=%s, ip=%s", + serial, + model.hostname, + model.ip, ) if not import_models: @@ -1433,7 +1508,10 @@ def _build_bootstrap_import_model( api_hostname = bs.get("hostname", "") if api_hostname and api_hostname != user_hostname: log.warning( - "Bootstrap (%s): API hostname '%s' overrides user-provided hostname '%s'. Using API value.", serial_number, api_hostname, user_hostname + "Bootstrap (%s): API hostname '%s' overrides user-provided hostname '%s'. Using API value.", + serial_number, + api_hostname, + user_hostname, ) hostname = api_hostname else: @@ -1447,7 +1525,10 @@ def _build_bootstrap_import_model( api_role = SwitchRole.normalize(api_role_raw) if api_role and api_role != switch_role: log.warning( - "Bootstrap (%s): API role '%s' overrides user-provided role '%s'. Using API value.", serial_number, api_role_raw, switch_role + "Bootstrap (%s): API role '%s' overrides user-provided role '%s'. Using API value.", + serial_number, + api_role_raw, + switch_role, ) switch_role = api_role except Exception: @@ -1520,7 +1601,9 @@ def _import_bootstrap_switches( log.debug("importBootstrap endpoint: %s", endpoint.path) log.debug("importBootstrap payload (masked): %s", mask_password(payload)) log.info( - "Importing %s bootstrap switch(es): %s", len(models), [m.serial_number for m in models] + "Importing %s bootstrap switch(es): %s", + len(models), + [m.serial_number for m in models], ) try: @@ -1551,7 +1634,7 @@ def _import_bootstrap_switches( log.error(msg) nd.module.fail_json(msg=msg) - log.info("importBootstrap API response success: %s", result.get('success')) + log.info("importBootstrap API response success: %s", result.get("success")) log.debug("EXIT: _import_bootstrap_switches()") def _build_preprovision_model( @@ -1606,7 +1689,9 @@ def _build_preprovision_model( switchRole=switch_role, ) - log.debug("EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number) + log.debug( + "EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number + ) return preprov_model def _preprovision_switches( @@ -1636,7 +1721,9 @@ def _preprovision_switches( log.debug("preProvision endpoint: %s", endpoint.path) log.debug("preProvision payload (masked): %s", mask_password(payload)) log.info( - "Pre-provisioning %s switch(es): %s", len(models), [m.serial_number for m in models] + "Pre-provisioning %s switch(es): %s", + len(models), + [m.serial_number for m in models], ) try: @@ -1667,7 +1754,7 @@ def _preprovision_switches( log.error(msg) nd.module.fail_json(msg=msg) - log.info("preProvision API response success: %s", result.get('success')) + log.info("preProvision API response success: %s", result.get("success")) log.debug("EXIT: _preprovision_switches()") def _handle_poap_swap( @@ -1705,7 +1792,9 @@ def _handle_poap_swap( if sw.switch_id } log.debug( - "Fabric inventory contains %s switch(es): %s", len(fabric_index), list(fabric_index.keys()) + "Fabric inventory contains %s switch(es): %s", + len(fabric_index), + list(fabric_index.keys()), ) for switch_cfg, poap_cfg, preprov_cfg in swap_entries: @@ -1719,7 +1808,8 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) log.info( - "Validated: pre-provisioned serial '%s' exists in fabric inventory", old_serial + "Validated: pre-provisioned serial '%s' exists in fabric inventory", + old_serial, ) # ------------------------------------------------------------------ @@ -1728,7 +1818,9 @@ def _handle_poap_swap( bootstrap_switches = query_bootstrap_switches(nd, fabric, log) bootstrap_index = build_bootstrap_index(bootstrap_switches) log.debug( - "Bootstrap list contains %s switch(es): %s", len(bootstrap_index), list(bootstrap_index.keys()) + "Bootstrap list contains %s switch(es): %s", + len(bootstrap_index), + list(bootstrap_index.keys()), ) for switch_cfg, poap_cfg, preprov_cfg in swap_entries: @@ -1742,9 +1834,7 @@ def _handle_poap_swap( ) log.error(msg) nd.module.fail_json(msg=msg) - log.info( - "Validated: new serial '%s' exists in bootstrap list", new_serial - ) + log.info("Validated: new serial '%s' exists in bootstrap list", new_serial) # ------------------------------------------------------------------ # Step 3: Call changeSwitchSerialNumber for each swap entry @@ -1754,7 +1844,9 @@ def _handle_poap_swap( new_serial = poap_cfg.serial_number log.info( - "Swapping serial for pre-provisioned switch: %s → %s", old_serial, new_serial + "Swapping serial for pre-provisioned switch: %s → %s", + old_serial, + new_serial, ) endpoint = EpManageFabricsSwitchChangeSerialNumberPost() @@ -1830,7 +1922,10 @@ def _handle_poap_swap( ) import_models.append(model) log.info( - "Built bootstrap model for swapped serial=%s, hostname=%s, ip=%s", new_serial, model.hostname, model.ip + "Built bootstrap model for swapped serial=%s, hostname=%s, ip=%s", + new_serial, + model.hostname, + model.ip, ) if not import_models: @@ -1860,7 +1955,9 @@ def _handle_poap_swap( ) log.info( - "POAP swap completed successfully for %s switch(es): %s", len(swap_entries), [sn for sn, _cfg in switch_actions] + "POAP swap completed successfully for %s switch(es): %s", + len(swap_entries), + [sn for sn, _cfg in switch_actions], ) log.debug("EXIT: _handle_poap_swap()") @@ -1932,7 +2029,8 @@ def handle( for switch_cfg in proposed_config: if not switch_cfg.rma: log.warning( - "Switch config for %s has no RMA block — skipping", switch_cfg.seed_ip + "Switch config for %s has no RMA block — skipping", + switch_cfg.seed_ip, ) continue for rma_cfg in switch_cfg.rma: @@ -1957,7 +2055,9 @@ def handle( bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) bootstrap_idx = build_bootstrap_index(bootstrap_switches) log.debug( - "Bootstrap index contains %s switch(es): %s", len(bootstrap_idx), list(bootstrap_idx.keys()) + "Bootstrap index contains %s switch(es): %s", + len(bootstrap_idx), + list(bootstrap_idx.keys()), ) # Build and submit each RMA request @@ -1997,7 +2097,9 @@ def handle( old_switch_info[rma_cfg.old_serial_number], ) log.info( - "Built RMA model: replacing %s with %s", rma_cfg.old_serial_number, rma_model.new_switch_id + "Built RMA model: replacing %s with %s", + rma_cfg.old_serial_number, + rma_model.new_switch_id, ) self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) @@ -2014,7 +2116,9 @@ def handle( # migration-mode phase. all_new_serials = [sn for sn, _cfg in switch_actions] log.info( - "Waiting for %s RMA replacement switch(es) to become ready: %s", len(all_new_serials), all_new_serials + "Waiting for %s RMA replacement switch(es) to become ready: %s", + len(all_new_serials), + all_new_serials, ) success = self.wait_utils.wait_for_rma_switch_ready(all_new_serials) if not success: @@ -2157,7 +2261,9 @@ def _build_rma_model( """ log = self.ctx.log log.debug( - "ENTER: _build_rma_model(new=%s, old=%s)", rma_cfg.new_serial_number, rma_cfg.old_serial_number + "ENTER: _build_rma_model(new=%s, old=%s)", + rma_cfg.new_serial_number, + rma_cfg.old_serial_number, ) # User config fields @@ -2277,7 +2383,7 @@ def _provision_rma_switch( log.error(msg) nd.module.fail_json(msg=msg) - log.info("RMA provision API response success: %s", result.get('success')) + log.info("RMA provision API response success: %s", result.get("success")) log.debug("EXIT: _provision_rma_switch()") @@ -2615,7 +2721,8 @@ def _handle_merged_state( add_configs.append(cfg) else: self.log.warning( - "No config found for switch %s, skipping add", sw.fabric_management_ip + "No config found for switch %s, skipping add", + sw.fabric_management_ip, ) if add_configs: @@ -2805,7 +2912,11 @@ def _handle_overridden_state( n_add = len(diff.get("to_add", [])) n_migrate = len(diff.get("migration_mode", [])) self.log.info( - "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", n_delete, n_update, n_add, n_migrate + "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", + n_delete, + n_update, + n_add, + n_migrate, ) self.results.action = "override" self.results.state = self.state @@ -2829,7 +2940,9 @@ def _handle_overridden_state( # Phase 1: Switches not in proposed config for sw in diff.get("to_delete", []): self.log.info( - "Marking for deletion (not in proposed): %s (%s)", sw.fabric_management_ip, sw.switch_id + "Marking for deletion (not in proposed): %s (%s)", + sw.fabric_management_ip, + sw.switch_id, ) switches_to_delete.append(sw) self._log_operation("delete", sw.fabric_management_ip) @@ -2847,7 +2960,9 @@ def _handle_overridden_state( ) if existing_sw: self.log.info( - "Marking for deletion (re-add update): %s (%s)", existing_sw.fabric_management_ip, existing_sw.switch_id + "Marking for deletion (re-add update): %s (%s)", + existing_sw.fabric_management_ip, + existing_sw.switch_id, ) switches_to_delete.append(existing_sw) self._log_operation( @@ -2913,7 +3028,9 @@ def _handle_gathered_state(self) -> None: self.results.register_api_call() self.log.info( - "Gathered %s switch(es) from fabric '%s'", len(list(self.existing)), self.fabric + "Gathered %s switch(es) from fabric '%s'", + len(list(self.existing)), + self.fabric, ) self.log.debug("EXIT: _handle_gathered_state()") @@ -2935,7 +3052,8 @@ def _handle_deleted_state( if proposed_config is None: switches_to_delete = list(self.existing) self.log.info( - "No proposed config — targeting all %s existing switch(es) for deletion", len(switches_to_delete) + "No proposed config — targeting all %s existing switch(es) for deletion", + len(switches_to_delete), ) for sw in switches_to_delete: self._log_operation("delete", sw.fabric_management_ip) @@ -2956,7 +3074,9 @@ def _handle_deleted_state( ) if existing_switch: self.log.info( - "Marking for deletion: %s (%s)", identifier, existing_switch.switch_id + "Marking for deletion: %s (%s)", + identifier, + existing_switch.switch_id, ) switches_to_delete.append(existing_switch) else: diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index e021fda0..1ede0946 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -199,7 +199,9 @@ def group_switches_by_credentials( ) groups.setdefault(group_key, []).append(switch) - log.info("Grouped %s switches into %s credential group(s)", len(switches), len(groups)) + log.info( + "Grouped %s switches into %s credential group(s)", len(switches), len(groups) + ) for idx, (key, group_switches) in enumerate(groups.items(), 1): username, _pw_hash, auth_proto, platform_type, preserve_config = key @@ -479,7 +481,8 @@ def wait_for_rma_switch_ready( ``True`` if all switches reach ``ok`` status, ``False`` on timeout. """ self.log.info( - "Waiting for RMA replacement switch(es) to become ready (skipping migration-mode phase): %s", serial_numbers + "Waiting for RMA replacement switch(es) to become ready (skipping migration-mode phase): %s", + serial_numbers, ) # Phase 1: wait until all new serials appear in the fabric inventory. @@ -522,7 +525,9 @@ def wait_for_discovery( self.log.error("Discovery failed for %s: %s", seed_ip, status) return None - self.log.debug("Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip) + self.log.debug( + "Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip + ) time.sleep(interval) self.log.warning("Discovery timeout for %s", seed_ip) @@ -604,7 +609,12 @@ def _poll_system_mode( pending = remaining self.log.debug( - "Attempt %s/%s: %s switch(es) waiting to %s: %s", attempt, self.max_attempts, len(pending), label, pending + "Attempt %s/%s: %s switch(es) waiting to %s: %s", + attempt, + self.max_attempts, + len(pending), + label, + pending, ) time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) @@ -719,13 +729,20 @@ def _wait_for_discovery_state( if not pending: self.log.info( - "All switches reached '%s' state (attempt %s)", target_state, attempt + "All switches reached '%s' state (attempt %s)", + target_state, + attempt, ) return True self._trigger_rediscovery(pending) self.log.debug( - "Attempt %s/%s: %s switch(es) not yet '%s': %s", attempt, self.max_attempts, len(pending), target_state, pending + "Attempt %s/%s: %s switch(es) not yet '%s': %s", + attempt, + self.max_attempts, + len(pending), + target_state, + pending, ) time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) @@ -757,7 +774,9 @@ def _wait_for_switches_in_fabric( """ pending = list(serial_numbers) self.log.info( - "Waiting for %s switch(es) to appear in fabric inventory: %s", len(pending), pending + "Waiting for %s switch(es) to appear in fabric inventory: %s", + len(pending), + pending, ) for attempt in range(1, self.max_attempts + 1): @@ -775,16 +794,23 @@ def _wait_for_switches_in_fabric( if not pending: self.log.info( - "All RMA switch(es) now visible in fabric inventory (attempt %s)", attempt + "All RMA switch(es) now visible in fabric inventory (attempt %s)", + attempt, ) return True self.log.debug( - "Attempt %s/%s: %s switch(es) not yet in fabric: %s", attempt, self.max_attempts, len(pending), pending + "Attempt %s/%s: %s switch(es) not yet in fabric: %s", + attempt, + self.max_attempts, + len(pending), + pending, ) time.sleep(self.wait_interval) - self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) + self.log.warning( + "Timeout waiting for switches to appear in fabric: %s", pending + ) return False def _fetch_switch_data( diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 34ae5f38..99e7ce5c 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -483,7 +483,9 @@ def main(): sw_module.manage_state() # Exit with results - log.info("State management completed successfully. Changed: %s", results.changed) + log.info( + "State management completed successfully. Changed: %s", results.changed + ) sw_module.exit_json() except NDModuleError as error: From e000775ba3e2f6b9ab40c16d8dc118946d458968 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Tue, 31 Mar 2026 12:03:18 +0530 Subject: [PATCH 082/109] Black + Sanity Fix --- plugins/action/nd_switches_validate.py | 51 +-- plugins/module_utils/endpoints/mixins.py | 52 +-- .../endpoints/v1/manage/manage_fabrics.py | 8 +- .../v1/manage/manage_fabrics_bootstrap.py | 4 +- .../v1/manage/manage_fabrics_switchactions.py | 8 +- .../v1/manage/manage_fabrics_switches.py | 24 +- .../manage_switches/nd_switch_resources.py | 407 ++++-------------- plugins/module_utils/manage_switches/utils.py | 70 +-- .../manage_switches/bootstrap_models.py | 80 +--- .../models/manage_switches/config_models.py | 118 ++--- .../manage_switches/discovery_models.py | 60 +-- .../models/manage_switches/enums.py | 16 +- .../manage_switches/preprovision_models.py | 12 +- .../models/manage_switches/rma_models.py | 36 +- .../manage_switches/switch_actions_models.py | 25 +- .../manage_switches/switch_data_models.py | 146 ++----- .../models/manage_switches/validators.py | 9 +- plugins/modules/nd_manage_switches.py | 4 +- tests/sanity/ignore-2.17.txt | 1 + tests/sanity/ignore-2.18.txt | 1 + tests/sanity/ignore-2.19.txt | 1 + .../test_endpoints_api_v1_manage_fabrics.py | 9 +- ...nts_api_v1_manage_fabrics_switchactions.py | 18 +- ...ndpoints_api_v1_manage_fabrics_switches.py | 24 +- 24 files changed, 274 insertions(+), 910 deletions(-) create mode 100644 tests/sanity/ignore-2.17.txt create mode 100644 tests/sanity/ignore-2.18.txt create mode 100644 tests/sanity/ignore-2.19.txt diff --git a/plugins/action/nd_switches_validate.py b/plugins/action/nd_switches_validate.py index 60a54bb9..ba96f29b 100644 --- a/plugins/action/nd_switches_validate.py +++ b/plugins/action/nd_switches_validate.py @@ -73,14 +73,7 @@ def parse_config_data(cls, value): return [SwitchConfigModel.model_validate(value)] if isinstance(value, list): try: - return [ - ( - SwitchConfigModel.model_validate(item) - if isinstance(item, dict) - else item - ) - for item in value - ] + return [(SwitchConfigModel.model_validate(item) if isinstance(item, dict) else item) for item in value] except (ValidationError, ValueError) as e: raise ValueError("Invalid format in Config Data: {0}".format(e)) if value is None: @@ -93,14 +86,7 @@ def parse_nd_data(cls, value): """Coerce raw ND API switch dicts into SwitchDataModel instances.""" if isinstance(value, list): try: - return [ - ( - SwitchDataModel.from_response(item) - if isinstance(item, dict) - else item - ) - for item in value - ] + return [(SwitchDataModel.from_response(item) if isinstance(item, dict) else item) for item in value] except (ValidationError, ValueError) as e: raise ValueError("Invalid format in ND Response: {0}".format(e)) if value is None: @@ -146,34 +132,19 @@ def validate_lists_equality(self): ip_address = nd_item.fabric_management_ip switch_role = nd_item.switch_role # SwitchRole enum or None - seed_ip_match = ( - seed_ip is not None - and ip_address is not None - and ip_address == seed_ip - ) or bool(ignore_fields["seed_ip"]) - role_match = ( - role_expected is not None - and switch_role is not None - and switch_role == role_expected - ) or bool(ignore_fields["role"]) + seed_ip_match = (seed_ip is not None and ip_address is not None and ip_address == seed_ip) or bool(ignore_fields["seed_ip"]) + role_match = (role_expected is not None and switch_role is not None and switch_role == role_expected) or bool(ignore_fields["role"]) if seed_ip_match and role_match: matched_indices.add(i) found_match = True if ignore_fields["seed_ip"]: break - elif ( - seed_ip_match - and role_expected is not None - and switch_role is not None - and switch_role != role_expected - ) or ignore_fields["role"]: + elif (seed_ip_match and role_expected is not None and switch_role is not None and switch_role != role_expected) or ignore_fields["role"]: role_mismatches.setdefault( seed_ip or ip_address, { - "expected_role": ( - role_expected.value if role_expected else None - ), + "expected_role": (role_expected.value if role_expected else None), "response_role": switch_role.value if switch_role else None, }, ) @@ -192,11 +163,7 @@ def validate_lists_equality(self): if missing_ips: display.display(" Missing IPs: {0}".format(missing_ips)) if role_mismatches: - display.display( - " Role mismatches: {0}".format( - json.dumps(role_mismatches, indent=2) - ) - ) + display.display(" Role mismatches: {0}".format(json.dumps(role_mismatches, indent=2))) self.response = False return self @@ -226,9 +193,7 @@ def run(self, tmp=None, task_vars=None): if not HAS_PYDANTIC or not HAS_MODELS: results["failed"] = True - results["msg"] = ( - "pydantic and the ND collection models are required for nd_switches_validate" - ) + results["msg"] = "pydantic and the ND collection models are required for nd_switches_validate" return results nd_data = self._task.args["nd_data"] diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index df33d6d3..9cd60fff 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -23,57 +23,43 @@ class ClusterNameMixin(BaseModel): """Mixin for endpoints that require cluster_name parameter.""" - cluster_name: Optional[str] = Field( - default=None, min_length=1, description="Cluster name" - ) + cluster_name: Optional[str] = Field(default=None, min_length=1, description="Cluster name") class FabricNameMixin(BaseModel): """Mixin for endpoints that require fabric_name parameter.""" - fabric_name: Optional[str] = Field( - default=None, min_length=1, max_length=64, description="Fabric name" - ) + fabric_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Fabric name") class FilterMixin(BaseModel): """Mixin for endpoints that require a Lucene filter expression.""" - filter: Optional[str] = Field( - default=None, min_length=1, description="Lucene filter expression" - ) + filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") class ForceShowRunMixin(BaseModel): """Mixin for endpoints that require force_show_run parameter.""" - force_show_run: BooleanStringEnum = Field( - default=BooleanStringEnum.FALSE, description="Force show running config" - ) + force_show_run: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Force show running config") class HealthCategoryMixin(BaseModel): """Mixin for endpoints that require health_category parameter.""" - health_category: Optional[str] = Field( - default=None, min_length=1, description="Health category" - ) + health_category: Optional[str] = Field(default=None, min_length=1, description="Health category") class InclAllMsdSwitchesMixin(BaseModel): """Mixin for endpoints that require incl_all_msd_switches parameter.""" - incl_all_msd_switches: BooleanStringEnum = Field( - default=BooleanStringEnum.FALSE, description="Include all MSD switches" - ) + incl_all_msd_switches: BooleanStringEnum = Field(default=BooleanStringEnum.FALSE, description="Include all MSD switches") class LinkUuidMixin(BaseModel): """Mixin for endpoints that require link_uuid parameter.""" - link_uuid: Optional[str] = Field( - default=None, min_length=1, description="Link UUID" - ) + link_uuid: Optional[str] = Field(default=None, min_length=1, description="Link UUID") class LoginIdMixin(BaseModel): @@ -85,25 +71,19 @@ class LoginIdMixin(BaseModel): class MaxMixin(BaseModel): """Mixin for endpoints that require a max results parameter.""" - max: Optional[int] = Field( - default=None, ge=1, description="Maximum number of results" - ) + max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") class NetworkNameMixin(BaseModel): """Mixin for endpoints that require network_name parameter.""" - network_name: Optional[str] = Field( - default=None, min_length=1, max_length=64, description="Network name" - ) + network_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Network name") class NodeNameMixin(BaseModel): """Mixin for endpoints that require node_name parameter.""" - node_name: Optional[str] = Field( - default=None, min_length=1, description="Node name" - ) + node_name: Optional[str] = Field(default=None, min_length=1, description="Node name") class OffsetMixin(BaseModel): @@ -115,22 +95,16 @@ class OffsetMixin(BaseModel): class SwitchSerialNumberMixin(BaseModel): """Mixin for endpoints that require switch_sn parameter.""" - switch_sn: Optional[str] = Field( - default=None, min_length=1, description="Switch serial number" - ) + switch_sn: Optional[str] = Field(default=None, min_length=1, description="Switch serial number") class TicketIdMixin(BaseModel): """Mixin for endpoints that require ticket_id parameter.""" - ticket_id: Optional[str] = Field( - default=None, min_length=1, description="Change control ticket ID" - ) + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") class VrfNameMixin(BaseModel): """Mixin for endpoints that require vrf_name parameter.""" - vrf_name: Optional[str] = Field( - default=None, min_length=1, max_length=64, description="VRF name" - ) + vrf_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="VRF name") diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index 9093a672..c54716ac 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -61,12 +61,8 @@ class FabricConfigDeployEndpointParams(EndpointQueryParams): ``` """ - force_show_run: Optional[bool] = Field( - default=None, description="Force show running config before deploy" - ) - incl_all_msd_switches: Optional[bool] = Field( - default=None, description="Include all MSD fabric switches" - ) + force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") + incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py index 28f5c761..d61f1e43 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py @@ -43,9 +43,7 @@ ) -class FabricsBootstrapEndpointParams( - FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams -): +class FabricsBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): """ # Summary diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py index 73c7b148..d217a9ca 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py @@ -93,9 +93,7 @@ class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): """ -class SwitchActionsImportEndpointParams( - ClusterNameMixin, TicketIdMixin, EndpointQueryParams -): +class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): """ # Summary @@ -289,9 +287,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricsSwitchActionsImportBootstrapPost( - _EpManageFabricsSwitchActionsBase -): +class EpManageFabricsSwitchActionsImportBootstrapPost(_EpManageFabricsSwitchActionsBase): """ # Summary diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py index d485cd09..fe28dfa4 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py @@ -47,9 +47,7 @@ ) -class FabricSwitchesGetEndpointParams( - FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams -): +class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): """ # Summary @@ -71,14 +69,10 @@ class FabricSwitchesGetEndpointParams( ``` """ - hostname: Optional[str] = Field( - default=None, min_length=1, description="Filter by switch hostname" - ) + hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") -class FabricSwitchesAddEndpointParams( - ClusterNameMixin, TicketIdMixin, EndpointQueryParams -): +class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): """ # Summary @@ -314,9 +308,7 @@ class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): """ -class _EpManageFabricsSwitchActionsPerSwitchBase( - FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel -): +class _EpManageFabricsSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): """ Base class for per-switch action endpoints. @@ -331,9 +323,7 @@ def _base_path(self) -> str: raise ValueError("fabric_name must be set before accessing path") if self.switch_sn is None: raise ValueError("switch_sn must be set before accessing path") - return BasePath.path( - "fabrics", self.fabric_name, "switches", self.switch_sn, "actions" - ) + return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") class EpManageFabricsSwitchProvisionRMAPost(_EpManageFabricsSwitchActionsPerSwitchBase): @@ -405,9 +395,7 @@ def verb(self) -> HttpVerbEnum: return HttpVerbEnum.POST -class EpManageFabricsSwitchChangeSerialNumberPost( - _EpManageFabricsSwitchActionsPerSwitchBase -): +class EpManageFabricsSwitchChangeSerialNumberPost(_EpManageFabricsSwitchActionsPerSwitchBase): """ # Summary diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index ec6caf84..6a30b47f 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -164,26 +164,18 @@ def validate_configs( validated_configs: List[SwitchConfigModel] = [] for idx, cfg in enumerate(configs_list): try: - validated = SwitchConfigModel.model_validate( - cfg, context={"state": state} - ) + validated = SwitchConfigModel.model_validate(cfg, context={"state": state}) validated_configs.append(validated) except ValidationError as e: error_detail = e.errors() if hasattr(e, "errors") else str(e) - error_msg = ( - f"Configuration validation failed for " - f"config index {idx}: {error_detail}" - ) + error_msg = f"Configuration validation failed for " f"config index {idx}: {error_detail}" log.error(error_msg) if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) else: raise ValueError(error_msg) from e except Exception as e: - error_msg = ( - f"Configuration validation failed for " - f"config index {idx}: {str(e)}" - ) + error_msg = f"Configuration validation failed for " f"config index {idx}: {str(e)}" log.error(error_msg) if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) @@ -202,10 +194,7 @@ def validate_configs( duplicate_ips.add(cfg.seed_ip) seen_ips.add(cfg.seed_ip) if duplicate_ips: - error_msg = ( - f"Duplicate seed_ip entries found in config: " - f"{sorted(duplicate_ips)}. Each switch must appear only once." - ) + error_msg = f"Duplicate seed_ip entries found in config: " f"{sorted(duplicate_ips)}. Each switch must appear only once." log.error(error_msg) if hasattr(nd, "module"): nd.module.fail_json(msg=error_msg) @@ -295,9 +284,7 @@ def compute_changes( match_key = "ip" if not existing_sw: - log.info( - "Switch %s (id=%s) not found in existing — marking to_add", ip, sid - ) + log.info("Switch %s (id=%s) not found in existing — marking to_add", ip, sid) changes["to_add"].append(prop_sw) continue @@ -316,28 +303,18 @@ def compute_changes( ) if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: - log.info( - "Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id - ) + log.info("Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id) changes["migration_mode"].append(prop_sw) continue - prop_dict = prop_sw.model_dump( - by_alias=False, exclude_none=True, include=compare_fields - ) - existing_dict = existing_sw.model_dump( - by_alias=False, exclude_none=True, include=compare_fields - ) + prop_dict = prop_sw.model_dump(by_alias=False, exclude_none=True, include=compare_fields) + existing_dict = existing_sw.model_dump(by_alias=False, exclude_none=True, include=compare_fields) if prop_dict == existing_dict: log.debug("Switch %s is idempotent — no changes needed", ip) changes["idempotent"].append(prop_sw) else: - diff_keys = { - k - for k in set(prop_dict) | set(existing_dict) - if prop_dict.get(k) != existing_dict.get(k) - } + diff_keys = {k for k in set(prop_dict) | set(existing_dict) if prop_dict.get(k) != existing_dict.get(k)} log.info( "Switch %s has differences — marking to_update. Changed fields: %s", ip, @@ -412,33 +389,19 @@ def validate_switch_api_fields( mismatches: List[str] = [] if model is not None and model != bootstrap_data.get("model"): - mismatches.append( - f"model: provided '{model}', " - f"bootstrap reports '{bootstrap_data.get('model')}'" - ) + mismatches.append(f"model: provided '{model}', " f"bootstrap reports '{bootstrap_data.get('model')}'") if version is not None and version != bootstrap_data.get("softwareVersion"): - mismatches.append( - f"version: provided '{version}', " - f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'" - ) + mismatches.append(f"version: provided '{version}', " f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'") if config_data is not None: - bs_gateway = bootstrap_data.get("gatewayIpMask") or bs_data.get( - "gatewayIpMask" - ) + bs_gateway = bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask") if config_data.gateway is not None and config_data.gateway != bs_gateway: - mismatches.append( - f"config_data.gateway: provided '{config_data.gateway}', " - f"bootstrap reports '{bs_gateway}'" - ) + mismatches.append(f"config_data.gateway: provided '{config_data.gateway}', " f"bootstrap reports '{bs_gateway}'") bs_models = bs_data.get("models", []) if config_data.models and sorted(config_data.models) != sorted(bs_models): - mismatches.append( - f"config_data.models: provided {config_data.models}, " - f"bootstrap reports {bs_models}" - ) + mismatches.append(f"config_data.models: provided {config_data.models}, " f"bootstrap reports {bs_models}") if mismatches: nd.module.fail_json( @@ -528,10 +491,7 @@ def discover( all_discovered.update(discovered_batch) except Exception as e: seed_ips = [sw.seed_ip for sw in switches] - msg = ( - f"Discovery failed for credential group " - f"(username={username}, IPs={seed_ips}): {e}" - ) + msg = f"Discovery failed for credential group " f"(username={username}, IPs={seed_ips}): {e}" log.error(msg) self.ctx.nd.module.fail_json(msg=msg) @@ -610,9 +570,7 @@ def bulk_discover( elif "switches" in response: switches_data = response.get("switches", []) - log.debug( - "Extracted %s switches from discovery response", len(switches_data) - ) + log.debug("Extracted %s switches from discovery response", len(switches_data)) discovered_results: Dict[str, Dict[str, Any]] = {} for discovered in switches_data: @@ -624,17 +582,11 @@ def bulk_discover( serial_number = discovered.get("serialNumber") if not serial_number: - msg = ( - f"Switch {ip} discovery response missing serial number. " - f"Cannot proceed without a valid serial number." - ) + msg = f"Switch {ip} discovery response missing serial number. " f"Cannot proceed without a valid serial number." log.error(msg) nd.module.fail_json(msg=msg) if not ip: - msg = ( - f"Switch with serial {serial_number} discovery response " - f"missing IP address. Cannot proceed without a valid IP." - ) + msg = f"Switch with serial {serial_number} discovery response " f"missing IP address. Cannot proceed without a valid IP." log.error(msg) nd.module.fail_json(msg=msg) @@ -714,9 +666,7 @@ def build_proposed( if existing_match: if cfg.role is not None: data = existing_match.model_dump(by_alias=True) - data["switchRole"] = ( - cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role - ) + data["switchRole"] = cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role proposed.append(SwitchDataModel.model_validate(data)) else: proposed.append(existing_match) @@ -726,10 +676,7 @@ def build_proposed( ) continue - msg = ( - f"Switch with seed IP {seed_ip} not discovered " - f"and not found in existing inventory." - ) + msg = f"Switch with seed IP {seed_ip} not discovered " f"and not found in existing inventory." log.error(msg) self.ctx.nd.module.fail_json(msg=msg) @@ -795,10 +742,7 @@ def bulk_add( missing_fields = [f for f in required_fields if not discovered.get(f)] if missing_fields: - msg = ( - f"Switch missing required fields from discovery: " - f"{', '.join(missing_fields)}. Cannot add to fabric." - ) + msg = f"Switch missing required fields from discovery: " f"{', '.join(missing_fields)}. Cannot add to fabric." log.error(msg) nd.module.fail_json(msg=msg) @@ -821,9 +765,7 @@ def bulk_add( if not switch_discoveries: log.error("No valid switches to add after validation") - raise SwitchOperationError( - "No valid switches to add - all failed validation" - ) + raise SwitchOperationError("No valid switches to add - all failed validation") add_request = AddSwitchesRequestModel( switches=switch_discoveries, @@ -848,10 +790,7 @@ def bulk_add( try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = ( - f"Bulk add switches to fabric '{self.ctx.fabric}' failed " - f"for {', '.join(serial_numbers)}: {e}" - ) + msg = f"Bulk add switches to fabric '{self.ctx.fabric}' failed " f"for {', '.join(serial_numbers)}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -866,10 +805,7 @@ def bulk_add( results.register_api_call() if not result.get("success"): - msg = ( - f"Bulk add switches failed for " - f"{', '.join(serial_numbers)}: {response}" - ) + msg = f"Bulk add switches failed for " f"{', '.join(serial_numbers)}: {response}" log.error(msg) nd.module.fail_json(msg=msg) @@ -911,9 +847,7 @@ def bulk_delete( if sn: serial_numbers.append(sn) else: - ip = getattr(switch, "fabric_management_ip", None) or getattr( - switch, "ip", None - ) + ip = getattr(switch, "fabric_management_ip", None) or getattr(switch, "ip", None) log.warning("Cannot delete switch %s: no serial number/switch_id", ip) if not serial_numbers: @@ -953,9 +887,7 @@ def bulk_delete( except Exception as e: log.error("Bulk delete failed: %s", e) - raise SwitchOperationError( - f"Bulk delete failed for {serial_numbers}: {e}" - ) from e + raise SwitchOperationError(f"Bulk delete failed for {serial_numbers}: {e}") from e def bulk_save_credentials( self, @@ -978,9 +910,7 @@ def bulk_save_credentials( cred_groups: Dict[Tuple[str, str], List[str]] = {} for sn, cfg in switch_actions: if not cfg.username or not cfg.password: - log.debug( - "Skipping credentials for %s: missing username or password", sn - ) + log.debug("Skipping credentials for %s: missing username or password", sn) continue key = (cfg.username, cfg.password) cred_groups.setdefault(key, []).append(sn) @@ -1024,9 +954,7 @@ def bulk_save_credentials( results.register_api_call() log.info("Credentials saved for %s switch(es)", len(serial_numbers)) except Exception as e: - msg = ( - f"Failed to save credentials for " f"switches {serial_numbers}: {e}" - ) + msg = f"Failed to save credentials for " f"switches {serial_numbers}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1154,11 +1082,7 @@ def post_add_processing( **wait_kwargs, ) if not success: - msg = ( - f"One or more {context} switches failed to become " - f"manageable in fabric '{self.ctx.fabric}'. " - f"Switches: {all_serials}" - ) + msg = f"One or more {context} switches failed to become " f"manageable in fabric '{self.ctx.fabric}'. " f"Switches: {all_serials}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1170,10 +1094,7 @@ def post_add_processing( try: self.finalize() except Exception as e: - msg = ( - f"Failed to finalize (config-save/deploy) for " - f"{context} switches {all_serials}: {e}" - ) + msg = f"Failed to finalize (config-save/deploy) for " f"{context} switches {all_serials}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1230,9 +1151,7 @@ def handle( # Classify entries first so check mode can report per-operation counts bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] preprov_entries: List[Tuple[SwitchConfigModel, PreprovisionConfigModel]] = [] - swap_entries: List[ - Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel] - ] = [] + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel]] = [] for switch_cfg in proposed_config: has_poap = bool(switch_cfg.poap) @@ -1275,9 +1194,7 @@ def handle( switch_cfg.seed_ip, preprov_extra, ) - swap_entries.append( - (switch_cfg, switch_cfg.poap, switch_cfg.preprovision) - ) + swap_entries.append((switch_cfg, switch_cfg.poap, switch_cfg.preprovision)) elif has_preprov: preprov_entries.append((switch_cfg, switch_cfg.preprovision)) elif has_poap: @@ -1319,9 +1236,7 @@ def handle( # Build lookup structures for idempotency checks. # Bootstrap: idempotent when both IP address AND serial number match. # PreProvision: idempotent when IP address alone matches. - existing_by_ip = { - sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip - } + existing_by_ip = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} active_bootstrap = [] for switch_cfg, poap_cfg in bootstrap_entries: @@ -1428,9 +1343,7 @@ def _handle_poap_bootstrap( log.error(msg) nd.module.fail_json(msg=msg) - model = self._build_bootstrap_import_model( - switch_cfg, poap_cfg, bootstrap_data - ) + model = self._build_bootstrap_import_model(switch_cfg, poap_cfg, bootstrap_data) import_models.append(model) log.info( "Built bootstrap model for serial=%s, hostname=%s, ip=%s", @@ -1477,9 +1390,7 @@ def _build_bootstrap_import_model( Completed ``BootstrapImportSwitchModel`` for API submission. """ log = self.ctx.log - log.debug( - "ENTER: _build_bootstrap_import_model(serial=%s)", poap_cfg.serial_number - ) + log.debug("ENTER: _build_bootstrap_import_model(serial=%s)", poap_cfg.serial_number) bs = bootstrap_data or {} bs_data = bs.get("data") or {} @@ -1569,9 +1480,7 @@ def _build_bootstrap_import_model( gatewayIpMask=gateway_ip_mask, ) - log.debug( - "EXIT: _build_bootstrap_import_model() -> %s", bootstrap_model.serial_number - ) + log.debug("EXIT: _build_bootstrap_import_model() -> %s", bootstrap_model.serial_number) return bootstrap_model def _import_bootstrap_switches( @@ -1609,10 +1518,7 @@ def _import_bootstrap_switches( try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = ( - f"importBootstrap API call failed for " - f"{[m.serial_number for m in models]}: {e}" - ) + msg = f"importBootstrap API call failed for " f"{[m.serial_number for m in models]}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1627,10 +1533,7 @@ def _import_bootstrap_switches( results.register_api_call() if not result.get("success"): - msg = ( - f"importBootstrap failed for " - f"{[m.serial_number for m in models]}: {response}" - ) + msg = f"importBootstrap failed for " f"{[m.serial_number for m in models]}: {response}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1652,9 +1555,7 @@ def _build_preprovision_model( Completed ``PreProvisionSwitchModel`` for API submission. """ log = self.ctx.log - log.debug( - "ENTER: _build_preprovision_model(serial=%s)", preprov_cfg.serial_number - ) + log.debug("ENTER: _build_preprovision_model(serial=%s)", preprov_cfg.serial_number) serial_number = preprov_cfg.serial_number hostname = preprov_cfg.hostname @@ -1689,9 +1590,7 @@ def _build_preprovision_model( switchRole=switch_role, ) - log.debug( - "EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number - ) + log.debug("EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number) return preprov_model def _preprovision_switches( @@ -1729,10 +1628,7 @@ def _preprovision_switches( try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = ( - f"preProvision API call failed for " - f"{[m.serial_number for m in models]}: {e}" - ) + msg = f"preProvision API call failed for " f"{[m.serial_number for m in models]}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1747,10 +1643,7 @@ def _preprovision_switches( results.register_api_call() if not result.get("success"): - msg = ( - f"preProvision failed for " - f"{[m.serial_number for m in models]}: {response}" - ) + msg = f"preProvision failed for " f"{[m.serial_number for m in models]}: {response}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1759,9 +1652,7 @@ def _preprovision_switches( def _handle_poap_swap( self, - swap_entries: List[ - Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"] - ], + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"]], existing: List[SwitchDataModel], ) -> None: """Process POAP serial-swap entries. @@ -1786,11 +1677,7 @@ def _handle_poap_swap( # ------------------------------------------------------------------ # Step 1: Validate preprovision serials exist in fabric inventory # ------------------------------------------------------------------ - fabric_index: Dict[str, Dict[str, Any]] = { - sw.switch_id: sw.model_dump(by_alias=True) - for sw in existing - if sw.switch_id - } + fabric_index: Dict[str, Dict[str, Any]] = {sw.switch_id: sw.model_dump(by_alias=True) for sw in existing if sw.switch_id} log.debug( "Fabric inventory contains %s switch(es): %s", len(fabric_index), @@ -1862,10 +1749,7 @@ def _handle_poap_swap( try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = ( - f"changeSwitchSerialNumber API call failed for " - f"{old_serial} → {new_serial}: {e}" - ) + msg = f"changeSwitchSerialNumber API call failed for " f"{old_serial} → {new_serial}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1883,10 +1767,7 @@ def _handle_poap_swap( results.register_api_call() if not result.get("success"): - msg = ( - f"Failed to swap serial number from {old_serial} " - f"to {new_serial}: {response}" - ) + msg = f"Failed to swap serial number from {old_serial} " f"to {new_serial}: {response}" log.error(msg) nd.module.fail_json(msg=msg) @@ -1896,9 +1777,7 @@ def _handle_poap_swap( # ------------------------------------------------------------------ post_swap_bootstrap = query_bootstrap_switches(nd, fabric, log) post_swap_index = build_bootstrap_index(post_swap_bootstrap) - log.debug( - "Post-swap bootstrap list contains %s switch(es)", len(post_swap_index) - ) + log.debug("Post-swap bootstrap list contains %s switch(es)", len(post_swap_index)) # ------------------------------------------------------------------ # Step 5: Build BootstrapImportSwitchModels and POST importBootstrap @@ -1917,9 +1796,7 @@ def _handle_poap_swap( log.error(msg) nd.module.fail_json(msg=msg) - model = self._build_bootstrap_import_model( - switch_cfg, poap_cfg, bootstrap_data - ) + model = self._build_bootstrap_import_model(switch_cfg, poap_cfg, bootstrap_data) import_models.append(model) log.info( "Built bootstrap model for swapped serial=%s, hostname=%s, ip=%s", @@ -2018,9 +1895,7 @@ def handle( results.operation_type = OperationType.CREATE results.response_current = {"MESSAGE": "check mode — skipped"} results.result_current = {"success": True, "changed": False} - results.diff_current = { - "rma_switches": [pc.seed_ip for pc in proposed_config] - } + results.diff_current = {"rma_switches": [pc.seed_ip for pc in proposed_config]} results.register_api_call() return @@ -2062,9 +1937,7 @@ def handle( # Build and submit each RMA request switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = ( - [] - ) # (new_serial, old_serial, switch_cfg) + rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = [] # (new_serial, old_serial, switch_cfg) for switch_cfg, rma_cfg in rma_entries: new_serial = rma_cfg.new_serial_number bootstrap_data = bootstrap_idx.get(new_serial) @@ -2104,9 +1977,7 @@ def handle( self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) switch_actions.append((rma_model.new_switch_id, switch_cfg)) - rma_diff_data.append( - (rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg) - ) + rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg)) # Post-processing: wait for RMA switches to become ready, then # save credentials and finalize. RMA switches come up via POAP @@ -2122,11 +1993,7 @@ def handle( ) success = self.wait_utils.wait_for_rma_switch_ready(all_new_serials) if not success: - msg = ( - f"One or more RMA replacement switches failed to become " - f"discoverable in fabric '{self.ctx.fabric}'. " - f"Switches: {all_new_serials}" - ) + msg = f"One or more RMA replacement switches failed to become " f"discoverable in fabric '{self.ctx.fabric}'. " f"Switches: {all_new_serials}" log.error(msg) nd.module.fail_json(msg=msg) @@ -2135,10 +2002,7 @@ def handle( try: self.fabric_ops.finalize() except Exception as e: - msg = ( - f"Failed to finalize (config-save/deploy) for RMA " - f"switches {all_new_serials}: {e}" - ) + msg = f"Failed to finalize (config-save/deploy) for RMA " f"switches {all_new_serials}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -2163,9 +2027,7 @@ def _validate_prerequisites( log.debug("ENTER: _validate_prerequisites()") - existing_by_serial: Dict[str, SwitchDataModel] = { - sw.serial_number: sw for sw in existing if sw.serial_number - } + existing_by_serial: Dict[str, SwitchDataModel] = {sw.serial_number: sw for sw in existing if sw.serial_number} result: Dict[str, Dict[str, Any]] = {} @@ -2200,9 +2062,7 @@ def _validate_prerequisites( if ad is None: nd.module.fail_json( msg=( - f"RMA: Switch '{old_serial}' has no additional data " - f"in the inventory response. Cannot verify discovery " - f"status and system mode." + f"RMA: Switch '{old_serial}' has no additional data " f"in the inventory response. Cannot verify discovery " f"status and system mode." ) ) @@ -2280,22 +2140,14 @@ def _build_rma_model( # Bootstrap API response fields public_key = bootstrap_data.get("publicKey", "") - finger_print = bootstrap_data.get( - "fingerPrint", bootstrap_data.get("fingerprint", "") - ) + finger_print = bootstrap_data.get("fingerPrint", bootstrap_data.get("fingerprint", "")) bs_data = bootstrap_data.get("data") or {} # Use user-provided values when available; fall back to bootstrap API data. model_name = rma_cfg.model or bootstrap_data.get("model", "") version = rma_cfg.version or bootstrap_data.get("softwareVersion", "") - gateway_ip_mask = ( - (rma_cfg.config_data.gateway if rma_cfg.config_data else None) - or bootstrap_data.get("gatewayIpMask") - or bs_data.get("gatewayIpMask") - ) - data_models = ( - rma_cfg.config_data.models if rma_cfg.config_data else None - ) or bs_data.get("models", []) + gateway_ip_mask = (rma_cfg.config_data.gateway if rma_cfg.config_data else None) or bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask") + data_models = (rma_cfg.config_data.models if rma_cfg.config_data else None) or bs_data.get("models", []) rma_model = RMASwitchModel( gatewayIpMask=gateway_ip_mask, @@ -2312,11 +2164,7 @@ def _build_rma_model( newSwitchId=new_switch_id, publicKey=public_key, fingerPrint=finger_print, - data=( - {"gatewayIpMask": gateway_ip_mask, "models": data_models} - if (gateway_ip_mask or data_models) - else None - ), + data=({"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None), ) log.debug("EXIT: _build_rma_model() -> newSwitchId=%s", rma_model.new_switch_id) @@ -2355,10 +2203,7 @@ def _provision_rma_switch( try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = ( - f"RMA provision API call failed for " - f"{old_switch_id} → {rma_model.new_switch_id}: {e}" - ) + msg = f"RMA provision API call failed for " f"{old_switch_id} → {rma_model.new_switch_id}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -2376,10 +2221,7 @@ def _provision_rma_switch( results.register_api_call() if not result.get("success"): - msg = ( - f"RMA provision failed for {old_switch_id} → " - f"{rma_model.new_switch_id}: {response}" - ) + msg = f"RMA provision failed for {old_switch_id} → " f"{rma_model.new_switch_id}: {response}" log.error(msg) nd.module.fail_json(msg=msg) @@ -2438,37 +2280,26 @@ def __init__( # Switch collections try: - self.proposed: NDConfigCollection = NDConfigCollection( - model_class=SwitchDataModel - ) + self.proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) self.existing: NDConfigCollection = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel, ) self.before: NDConfigCollection = self.existing.copy() - self.sent: NDConfigCollection = NDConfigCollection( - model_class=SwitchDataModel - ) + self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) except Exception as e: - msg = ( - f"Failed to query fabric '{self.fabric}' inventory " - f"during initialization: {e}" - ) + msg = f"Failed to query fabric '{self.fabric}' inventory " f"during initialization: {e}" log.error(msg) nd.module.fail_json(msg=msg) # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] - self.output: NDOutput = NDOutput( - output_level=self.module.params.get("output_level", "normal") - ) + self.output: NDOutput = NDOutput(output_level=self.module.params.get("output_level", "normal")) self.output.assign(before=self.before, after=self.existing) # Utility instances (SwitchWaitUtils / FabricUtils depend on self) self.fabric_utils = FabricUtils(self.nd, self.fabric, log) - self.wait_utils = SwitchWaitUtils( - self, self.fabric, log, fabric_utils=self.fabric_utils - ) + self.wait_utils = SwitchWaitUtils(self, self.fabric, log, fabric_utils=self.fabric_utils) # Service instances (Dependency Injection) self.discovery = SwitchDiscoveryService(self.ctx) @@ -2496,9 +2327,7 @@ def exit_json(self) -> None: gathered = [] for sw in self.existing: try: - gathered.append( - SwitchConfigModel.from_switch_data(sw).to_gathered_dict() - ) + gathered.append(SwitchConfigModel.from_switch_data(sw).to_gathered_dict()) except (ValueError, Exception) as exc: msg = f"Failed to convert switch {sw.switch_id!r} to gathered format: {exc}" self.log.error(msg) @@ -2539,43 +2368,25 @@ def manage_state(self) -> None: # gathered — read-only, no config accepted if self.state == "gathered": if self.config: - self.nd.module.fail_json( - msg="'config' must not be provided for 'gathered' state." - ) + self.nd.module.fail_json(msg="'config' must not be provided for 'gathered' state.") return self._handle_gathered_state() # deleted — config is optional if self.state == "deleted": - proposed_config = ( - SwitchDiffEngine.validate_configs( - self.config, self.state, self.nd, self.log - ) - if self.config - else None - ) + proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config else None return self._handle_deleted_state(proposed_config) # merged / overridden — config is required if not self.config: - self.nd.module.fail_json( - msg=f"'config' is required for '{self.state}' state." - ) + self.nd.module.fail_json(msg=f"'config' is required for '{self.state}' state.") - proposed_config = SwitchDiffEngine.validate_configs( - self.config, self.state, self.nd, self.log - ) + proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) # Partition configs by operation type - poap_configs = [ - c - for c in proposed_config - if c.operation_type in ("poap", "preprovision", "swap") - ] + poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] rma_configs = [c for c in proposed_config if c.operation_type == "rma"] normal_configs = [c for c in proposed_config if c.operation_type == "normal"] # Capture all proposed configs for NDOutput - output_proposed: NDConfigCollection = NDConfigCollection( - model_class=SwitchConfigModel - ) + output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) for cfg in proposed_config: output_proposed.add(cfg) self.output.assign(proposed=output_proposed) @@ -2589,17 +2400,13 @@ def manage_state(self) -> None: # POAP and RMA are only valid with state=merged if (poap_configs or rma_configs) and self.state != "merged": - self.nd.module.fail_json( - msg="POAP and RMA configs are only supported with state=merged" - ) + self.nd.module.fail_json(msg="POAP and RMA configs are only supported with state=merged") # Normal discovery runs first so the fabric inventory is up to date # before POAP/RMA handlers execute. if normal_configs: existing_ips = {sw.fabric_management_ip for sw in self.existing} - configs_to_discover = [ - cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips - ] + configs_to_discover = [cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips] if configs_to_discover: self.log.info( "Discovery needed for %s/%s switch(es) — %s already in fabric", @@ -2609,17 +2416,11 @@ def manage_state(self) -> None: ) discovered_data = self.discovery.discover(configs_to_discover) else: - self.log.info( - "All proposed switches already in fabric — skipping discovery" - ) + self.log.info("All proposed switches already in fabric — skipping discovery") discovered_data = {} - built = self.discovery.build_proposed( - normal_configs, discovered_data, list(self.existing) - ) + built = self.discovery.build_proposed(normal_configs, discovered_data, list(self.existing)) self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) - diff = SwitchDiffEngine.compute_changes( - list(self.proposed), list(self.existing), self.log - ) + diff = SwitchDiffEngine.compute_changes(list(self.proposed), list(self.existing), self.log) state_handlers = { "merged": self._handle_merged_state, @@ -2700,9 +2501,7 @@ def _handle_merged_state( self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_add": [sw.fabric_management_ip for sw in switches_to_add], - "migration_mode": [ - sw.fabric_management_ip for sw in migration_switches - ], + "migration_mode": [sw.fabric_management_ip for sw in migration_switches], "save_deploy_required": idempotent_save_req, } self.results.register_api_call() @@ -2743,9 +2542,7 @@ def _handle_merged_state( if disc: pairs.append((cfg, disc)) else: - self.log.warning( - "No discovery data for %s, skipping", cfg.seed_ip - ) + self.log.warning("No discovery data for %s, skipping", cfg.seed_ip) if not pairs: continue @@ -2797,10 +2594,7 @@ def _handle_merged_state( # skip the unreachable/reload detection phases. all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) if all_preserve_config: - self.log.info( - "All switches in batch are brownfield (preserve_config=True) — " - "reload detection will be skipped" - ) + self.log.info("All switches in batch are brownfield (preserve_config=True) — reload detection will be skipped") self.fabric_ops.post_add_processing( switch_actions, @@ -2838,11 +2632,7 @@ def _merged_handle_idempotent( for sw in idempotent_switches: existing_sw = existing_by_ip.get(sw.fabric_management_ip) - status = ( - existing_sw.additional_data.config_sync_status - if existing_sw and existing_sw.additional_data - else None - ) + status = existing_sw.additional_data.config_sync_status if existing_sw and existing_sw.additional_data else None if status != ConfigSyncStatus.IN_SYNC: self.log.info( "Switch %s (%s) is config-idempotent but configSyncStatus is '%s' — will run config save and deploy", @@ -2950,12 +2740,7 @@ def _handle_overridden_state( # Phase 2: Switches that need updating (delete-then-re-add) for sw in diff.get("to_update", []): existing_sw = next( - ( - e - for e in self.existing - if e.switch_id == sw.switch_id - or e.fabric_management_ip == sw.fabric_management_ip - ), + (e for e in self.existing if e.switch_id == sw.switch_id or e.fabric_management_ip == sw.fabric_management_ip), None, ) if existing_sw: @@ -2965,9 +2750,7 @@ def _handle_overridden_state( existing_sw.switch_id, ) switches_to_delete.append(existing_sw) - self._log_operation( - "delete_for_update", existing_sw.fabric_management_ip - ) + self._log_operation("delete_for_update", existing_sw.fabric_management_ip) diff["to_add"].append(sw) @@ -2987,9 +2770,7 @@ def _handle_overridden_state( # skipped during initial discovery because they were already in the # fabric). update_ips = {sw.fabric_management_ip for sw in switches_to_delete} - configs_needing_rediscovery = [ - cfg for cfg in proposed_config if cfg.seed_ip in update_ips - ] + configs_needing_rediscovery = [cfg for cfg in proposed_config if cfg.seed_ip in update_ips] if configs_needing_rediscovery: self.log.info( "Re-discovering %s switch(es) after deletion for re-add: %s", @@ -3061,15 +2842,9 @@ def _handle_deleted_state( switches_to_delete: List[SwitchDataModel] = [] for switch_config in proposed_config: identifier = switch_config.seed_ip - self.log.debug( - "Looking for switch to delete with seed IP: %s", identifier - ) + self.log.debug("Looking for switch to delete with seed IP: %s", identifier) existing_switch = next( - ( - sw - for sw in self.existing - if sw.fabric_management_ip == identifier - ), + (sw for sw in self.existing if sw.fabric_management_ip == identifier), None, ) if existing_switch: @@ -3089,9 +2864,7 @@ def _handle_deleted_state( # Check mode — preview only if self.nd.module.check_mode: - self.log.info( - "Check mode: would delete %s switch(es)", len(switches_to_delete) - ) + self.log.info("Check mode: would delete %s switch(es)", len(switches_to_delete)) self.results.action = "delete" self.results.state = self.state self.results.operation_type = OperationType.DELETE @@ -3106,9 +2879,7 @@ def _handle_deleted_state( self.results.register_api_call() return - self.log.info( - "Proceeding to delete %s switch(es) from fabric", len(switches_to_delete) - ) + self.log.info("Proceeding to delete %s switch(es) from fabric", len(switches_to_delete)) self.fabric_ops.bulk_delete(switches_to_delete) for sw in switches_to_delete: self.sent.add(sw) diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py index 1ede0946..d7094add 100644 --- a/plugins/module_utils/manage_switches/utils.py +++ b/plugins/module_utils/manage_switches/utils.py @@ -139,10 +139,7 @@ def get_switch_field( if name in switch and switch[name] is not None: return switch[name] # Try camelCase variant - camel = "".join( - word.capitalize() if i > 0 else word - for i, word in enumerate(name.split("_")) - ) + camel = "".join(word.capitalize() if i > 0 else word for i, word in enumerate(name.split("_"))) if camel in switch and switch[camel] is not None: return switch[camel] return None @@ -199,20 +196,12 @@ def group_switches_by_credentials( ) groups.setdefault(group_key, []).append(switch) - log.info( - "Grouped %s switches into %s credential group(s)", len(switches), len(groups) - ) + log.info("Grouped %s switches into %s credential group(s)", len(switches), len(groups)) for idx, (key, group_switches) in enumerate(groups.items(), 1): username, _pw_hash, auth_proto, platform_type, preserve_config = key - auth_value = ( - auth_proto.value if hasattr(auth_proto, "value") else str(auth_proto) - ) - platform_value = ( - platform_type.value - if hasattr(platform_type, "value") - else str(platform_type) - ) + auth_value = auth_proto.value if hasattr(auth_proto, "value") else str(auth_proto) + platform_value = platform_type.value if hasattr(platform_type, "value") else str(platform_type) log.debug( "Group %s: %s switches with username=%s, auth=%s, platform=%s, preserve_config=%s", idx, @@ -285,10 +274,7 @@ def build_bootstrap_index( Returns: Dict mapping ``serial_number`` -> switch dict. """ - return { - sw.get("serialNumber", sw.get("serial_number", "")): sw - for sw in bootstrap_switches - } + return {sw.get("serialNumber", sw.get("serial_number", "")): sw for sw in bootstrap_switches} def build_poap_data_block(poap_cfg) -> Optional[Dict[str, Any]]: @@ -436,10 +422,7 @@ def wait_for_switch_manageable( # Phase 3: brownfield shortcut — no reload expected if all_preserve_config: - self.log.info( - "All switches are brownfield (preserve_config=True) — " - "skipping reload detection (phases 5-6)" - ) + self.log.info("All switches are brownfield (preserve_config=True) — skipping reload detection (phases 5-6)") return True # Phase 4: greenfield shortcut (skipped for POAP bootstrap) @@ -448,10 +431,7 @@ def wait_for_switch_manageable( return True if skip_greenfield_check: - self.log.info( - "Greenfield debug check skipped " - "(POAP bootstrap — device always reboots)" - ) + self.log.info("Greenfield debug check skipped (POAP bootstrap — device always reboots)") # Phase 5: wait for "unreachable" (switch is reloading) if not self._wait_for_discovery_state(serial_numbers, "unreachable"): @@ -525,9 +505,7 @@ def wait_for_discovery( self.log.error("Discovery failed for %s: %s", seed_ip, status) return None - self.log.debug( - "Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip - ) + self.log.debug("Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip) time.sleep(interval) self.log.warning("Discovery timeout for %s", seed_ip) @@ -565,9 +543,7 @@ def _wait_for_system_mode(self, serial_numbers: List[str]) -> bool: if pending is None: return False - self.log.info( - "All switches in normal system mode — proceeding to discovery checks" - ) + self.log.info("All switches in normal system mode — proceeding to discovery checks") return True def _poll_system_mode( @@ -599,9 +575,7 @@ def _poll_system_mode( if switch_data is None: return None - remaining = self._filter_by_system_mode( - pending, switch_data, target_mode, expect_match - ) + remaining = self._filter_by_system_mode(pending, switch_data, target_mode, expect_match) if not remaining: self.log.info("All switches %s mode (attempt %s)", label, attempt) @@ -655,9 +629,7 @@ def _filter_by_system_mode( mode = sw.get("additionalData", {}).get("systemMode", "").lower() # expect_match=True: "still in target_mode" → not done # expect_match=False: "not yet in target_mode" → not done - still_waiting = ( - (mode == target_mode) if expect_match else (mode != target_mode) - ) + still_waiting = (mode == target_mode) if expect_match else (mode != target_mode) if still_waiting: remaining.append(sn) return remaining @@ -723,9 +695,7 @@ def _wait_for_discovery_state( if switch_data is None: return False - pending = self._filter_by_discovery_status( - pending, switch_data, target_state - ) + pending = self._filter_by_discovery_status(pending, switch_data, target_state) if not pending: self.log.info( @@ -746,9 +716,7 @@ def _wait_for_discovery_state( ) time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) - self.log.warning( - "Timeout waiting for '%s' state: %s", target_state, serial_numbers - ) + self.log.warning("Timeout waiting for '%s' state: %s", target_state, serial_numbers) return False # ===================================================================== @@ -808,9 +776,7 @@ def _wait_for_switches_in_fabric( ) time.sleep(self.wait_interval) - self.log.warning( - "Timeout waiting for switches to appear in fabric: %s", pending - ) + self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) return False def _fetch_switch_data( @@ -894,12 +860,8 @@ def _is_greenfield_debug_enabled(self) -> bool: try: fabric_info = self.fabric_utils.get_fabric_info() - self.log.debug( - "Fabric info retrieved for greenfield check: %s", fabric_info - ) - flag = ( - fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() - ) + self.log.debug("Fabric info retrieved for greenfield check: %s", fabric_info) + flag = fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() self.log.debug("Greenfield debug flag value: '%s'", flag) self._greenfield_debug_enabled = flag == "enable" except Exception as e: diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index e65f0d2f..68931315 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -42,12 +42,8 @@ class BootstrapBaseData(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - gateway_ip_mask: Optional[str] = Field( - default=None, alias="gatewayIpMask", description="Gateway IP address with mask" - ) - models: Optional[List[str]] = Field( - default=None, description="Supported models for switch" - ) + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + models: Optional[List[str]] = Field(default=None, description="Supported models for switch") @field_validator("gateway_ip_mask", mode="before") @classmethod @@ -61,12 +57,8 @@ class BootstrapBaseModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" - gateway_ip_mask: str = Field( - ..., alias="gatewayIpMask", description="Gateway IP address with mask" - ) + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., @@ -79,9 +71,7 @@ class BootstrapBaseModel(NDBaseModel): description="Image policy associated with the switch during bootstrap", ) switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") - data: Optional[BootstrapBaseData] = Field( - default=None, description="Additional bootstrap data" - ) + data: Optional[BootstrapBaseData] = Field(default=None, description="Additional bootstrap data") @field_validator("gateway_ip_mask", mode="before") @classmethod @@ -101,16 +91,10 @@ class BootstrapCredentialModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] - password: str = Field( - ..., description="Switch password to be set during bootstrap for admin user" - ) - discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., alias="discoveryAuthProtocol" - ) + password: str = Field(..., description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") use_new_credentials: bool = Field( default=False, alias="useNewCredentials", @@ -143,15 +127,11 @@ def validate_credentials(self) -> "BootstrapCredentialModel": if self.use_new_credentials: if self.remote_credential_store == RemoteCredentialStore.CYBERARK: if not self.remote_credential_store_key: - raise ValueError( - "remote_credential_store_key is required when " - "remote_credential_store is 'cyberark'" - ) + raise ValueError("remote_credential_store_key is required when remote_credential_store is 'cyberark'") elif self.remote_credential_store == RemoteCredentialStore.LOCAL: if not self.discovery_username or not self.discovery_password: raise ValueError( - "discovery_username and discovery_password are required when " - "remote_credential_store is 'local' and use_new_credentials is True" + "discovery_username and discovery_password are required when remote_credential_store is 'local' and use_new_credentials is True" ) return self @@ -162,14 +142,10 @@ class BootstrapImportSpecificModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" hostname: str = Field(..., description="Hostname of the bootstrap switch") ip: str = Field(..., description="IP address of the bootstrap switch") - serial_number: str = Field( - ..., alias="serialNumber", description="Serial number of the bootstrap switch" - ) + serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") in_inventory: bool = Field( ..., alias="inInventory", @@ -182,9 +158,7 @@ class BootstrapImportSpecificModel(NDBaseModel): alias="dhcpBootstrapIp", description="This is used for device day-0 bring-up when using inband reachability", ) - seed_switch: bool = Field( - default=False, alias="seedSwitch", description="Use as seed switch" - ) + seed_switch: bool = Field(default=False, alias="seedSwitch", description="Use as seed switch") @field_validator("hostname", mode="before") @classmethod @@ -218,14 +192,10 @@ class BootstrapImportSwitchModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] - serial_number: str = Field( - ..., alias="serialNumber", description="Serial number of the bootstrap switch" - ) + serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., @@ -234,12 +204,8 @@ class BootstrapImportSwitchModel(NDBaseModel): ) hostname: str = Field(..., description="Hostname of the bootstrap switch") ip: str = Field(..., description="IP address of the bootstrap switch") - password: str = Field( - ..., description="Switch password to be set during bootstrap for admin user" - ) - discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., alias="discoveryAuthProtocol" - ) + password: str = Field(..., description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") remote_credential_store: RemoteCredentialStore = Field( @@ -278,9 +244,7 @@ class BootstrapImportSwitchModel(NDBaseModel): description="Image policy associated with the switch during bootstrap", ) switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") - gateway_ip_mask: str = Field( - ..., alias="gatewayIpMask", description="Gateway IP address with mask" - ) + gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") @field_validator("ip", mode="before") @classmethod @@ -330,12 +294,8 @@ class ImportBootstrapSwitchesRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" - switches: List[BootstrapImportSwitchModel] = Field( - ..., description="PowerOn Auto Provisioning switches" - ) + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + switches: List[BootstrapImportSwitchModel] = Field(..., description="PowerOn Auto Provisioning switches") def to_payload(self) -> Dict[str, Any]: """Convert to API payload format.""" diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 87a21f98..dc45c2d2 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -56,31 +56,18 @@ class ConfigDataModel(NDNestedModel): min_length=1, description="List of model of modules in switch to Bootstrap/Pre-provision/RMA", ) - gateway: str = Field( - ..., description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)" - ) + gateway: str = Field(..., description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)") @field_validator("models", mode="before") @classmethod def validate_models_list(cls, v: Any) -> List[str]: """Validate models is a non-empty list of strings.""" if v is None: - raise ValueError( - "'models' is required in config_data. " - "Provide a list of module model strings, " - "e.g. models: [N9K-X9364v, N9K-vSUP]" - ) + raise ValueError("'models' is required in config_data. Provide a list of module model strings, e.g. models: [N9K-X9364v, N9K-vSUP]") if not isinstance(v, list): - raise ValueError( - f"'models' must be a list of module model strings, got: {type(v).__name__}. " - f"e.g. models: [N9K-X9364v, N9K-vSUP]" - ) + raise ValueError(f"'models' must be a list of module model strings, got: {type(v).__name__}. e.g. models: [N9K-X9364v, N9K-vSUP]") if len(v) == 0: - raise ValueError( - "'models' list cannot be empty. " - "Provide at least one module model string, " - "e.g. models: [N9K-X9364v, N9K-vSUP]" - ) + raise ValueError("'models' list cannot be empty. Provide at least one module model string, e.g. models: [N9K-X9364v, N9K-vSUP]") return v @field_validator("gateway", mode="before") @@ -136,13 +123,9 @@ def validate_discovery_credentials_pair(self) -> "POAPConfigModel": has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) if has_user and not has_pass: - raise ValueError( - "discovery_password must be set when discovery_username is specified" - ) + raise ValueError("discovery_password must be set when discovery_username is specified") if has_pass and not has_user: - raise ValueError( - "discovery_username must be set when discovery_password is specified" - ) + raise ValueError("discovery_username must be set when discovery_password is specified") return self @field_validator("serial_number", mode="before") @@ -173,22 +156,13 @@ class PreprovisionConfigModel(NDNestedModel): min_length=1, description="Serial number of switch to Pre-provision", ) - model: str = Field( - ..., min_length=1, description="Model of switch to Pre-provision" - ) - version: str = Field( - ..., min_length=1, description="Software version of switch to Pre-provision" - ) - hostname: str = Field( - ..., description="Hostname for the switch during pre-provision" - ) + model: str = Field(..., min_length=1, description="Model of switch to Pre-provision") + version: str = Field(..., min_length=1, description="Software version of switch to Pre-provision") + hostname: str = Field(..., description="Hostname for the switch during pre-provision") config_data: ConfigDataModel = Field( ..., alias="configData", - description=( - "Basic config data of switch to Pre-provision. " - "'models' (list of module models) and 'gateway' (IP with mask) are mandatory." - ), + description=("Basic config data of switch to Pre-provision. " "'models' (list of module models) and 'gateway' (IP with mask) are mandatory."), ) # Optional @@ -214,13 +188,9 @@ def validate_discovery_credentials_pair(self) -> "PreprovisionConfigModel": has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) if has_user and not has_pass: - raise ValueError( - "discovery_password must be set when discovery_username is specified" - ) + raise ValueError("discovery_password must be set when discovery_username is specified") if has_pass and not has_user: - raise ValueError( - "discovery_username must be set when discovery_password is specified" - ) + raise ValueError("discovery_username must be set when discovery_password is specified") return self @field_validator("serial_number", mode="before") @@ -317,13 +287,9 @@ def validate_discovery_credentials_pair(self) -> "RMAConfigModel": has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) if has_user and not has_pass: - raise ValueError( - "discovery_password must be set when discovery_username is specified" - ) + raise ValueError("discovery_password must be set when discovery_username is specified") if has_pass and not has_user: - raise ValueError( - "discovery_username must be set when discovery_password is specified" - ) + raise ValueError("discovery_username must be set when discovery_password is specified") return self @@ -446,9 +412,7 @@ def to_config_dict(self) -> Dict[str, Any]: "discovery_username": True, "discovery_password": True, }, - "rma": { - "__all__": {"discovery_username": True, "discovery_password": True} - }, + "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, } ) @@ -460,9 +424,7 @@ def reject_auth_proto_for_special_ops(self) -> "SwitchConfigModel": all inputs have already been coerced by Pydantic into a typed SnmpV3AuthProtocol value, so a direct enum comparison is safe. """ - if ( - self.poap or self.preprovision or self.rma - ) and self.auth_proto != SnmpV3AuthProtocol.MD5: + if (self.poap or self.preprovision or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: if self.poap or self.preprovision: op = "POAP/Pre-provision" else: @@ -487,10 +449,7 @@ def validate_special_ops_exclusion(self) -> "SwitchConfigModel": - rma combined with poap or preprovision """ if self.rma and (self.poap or self.preprovision): - raise ValueError( - "Cannot specify 'rma' together with 'poap' or 'preprovision' " - "for the same switch" - ) + raise ValueError("Cannot specify 'rma' together with 'poap' or 'preprovision' for the same switch") return self @model_validator(mode="after") @@ -498,13 +457,9 @@ def validate_special_ops_credentials(self) -> "SwitchConfigModel": """Validate credentials for POAP, Pre-provision, Swap and RMA operations.""" if self.poap or self.preprovision or self.rma: if not self.username or not self.password: - raise ValueError( - "For POAP, Pre-provision, and RMA operations, username and password are required" - ) + raise ValueError("For POAP, Pre-provision, and RMA operations, username and password are required") if self.username != "admin": - raise ValueError( - "For POAP, Pre-provision, and RMA operations, username should be 'admin'" - ) + raise ValueError("For POAP, Pre-provision, and RMA operations, username should be 'admin'") return self @model_validator(mode="after") @@ -522,27 +477,17 @@ def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": # POAP/Pre-provision/Swap only allowed with merged if (self.poap or self.preprovision) and state not in (None, "merged"): - raise ValueError( - f"POAP/Pre-provision operations require 'merged' state, " - f"got '{state}' (switch: {self.seed_ip})" - ) + raise ValueError(f"POAP/Pre-provision operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})") # RMA only allowed with merged if self.rma and state not in (None, "merged"): - raise ValueError( - f"RMA operations require 'merged' state, " - f"got '{state}' (switch: {self.seed_ip})" - ) + raise ValueError(f"RMA operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})") if state in ("merged", "overridden"): if self.role is None: self.role = SwitchRole.LEAF if not self.username or not self.password: - raise ValueError( - f"username and password are required " - f"for '{state}' state " - f"(switch: {self.seed_ip})" - ) + raise ValueError(f"username and password are required " f"for '{state}' state " f"(switch: {self.seed_ip})") return self @field_validator("seed_ip", mode="before") @@ -576,9 +521,7 @@ def validate_seed_ip(cls, v: str) -> str: except socket.gaierror: continue - raise ValueError( - f"'{v}' is not a valid IP address and could not be resolved via DNS" - ) + raise ValueError(f"'{v}' is not a valid IP address and could not be resolved via DNS") @field_validator("rma", mode="before") @classmethod @@ -590,9 +533,7 @@ def validate_rma_list_not_empty(cls, v: Optional[List]) -> Optional[List]: @field_validator("auth_proto", mode="before") @classmethod - def normalize_auth_proto( - cls, v: Union[str, SnmpV3AuthProtocol, None] - ) -> SnmpV3AuthProtocol: + def normalize_auth_proto(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: """Normalize auth_proto to handle case-insensitive input (MD5, md5, etc.).""" return SnmpV3AuthProtocol.normalize(v) @@ -637,16 +578,9 @@ def from_switch_data(cls, sw: Any) -> "SwitchConfigModel": making it impossible to construct a valid config entry. """ if not sw.fabric_management_ip: - raise ValueError( - f"Switch {sw.switch_id!r} has no fabric_management_ip — " - "cannot build a gathered config entry without a seed IP." - ) + raise ValueError(f"Switch {sw.switch_id!r} has no fabric_management_ip — " "cannot build a gathered config entry without a seed IP.") - platform_type = ( - sw.additional_data.platform_type - if sw.additional_data and hasattr(sw.additional_data, "platform_type") - else None - ) + platform_type = sw.additional_data.platform_type if sw.additional_data and hasattr(sw.additional_data, "platform_type") else None data: Dict[str, Any] = {"seed_ip": sw.fabric_management_ip} if sw.switch_role is not None: diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index 2092bd5d..6cf95066 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -41,9 +41,7 @@ class ShallowDiscoveryRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] seed_ip_collection: List[str] = Field( ..., @@ -62,12 +60,8 @@ class ShallowDiscoveryRequestModel(NDBaseModel): alias="snmpV3AuthProtocol", description="SNMPv3 authentication protocols", ) - username: Optional[str] = Field( - default=None, description="User name for switch login" - ) - password: Optional[str] = Field( - default=None, description="User password for switch login" - ) + username: Optional[str] = Field(default=None, description="User name for switch login") + password: Optional[str] = Field(default=None, description="User password for switch login") remote_credential_store: Optional[RemoteCredentialStore] = Field( default=None, alias="remoteCredentialStore", @@ -96,17 +90,13 @@ def validate_seed_ips(cls, v: List[str]) -> List[str]: @field_validator("snmp_v3_auth_protocol", mode="before") @classmethod - def normalize_snmp_auth( - cls, v: Union[str, SnmpV3AuthProtocol, None] - ) -> SnmpV3AuthProtocol: + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: """Normalize SNMP auth protocol (case-insensitive).""" return SnmpV3AuthProtocol.normalize(v) @field_validator("platform_type", mode="before") @classmethod - def normalize_platform( - cls, v: Union[str, ShallowDiscoveryPlatformType, None] - ) -> ShallowDiscoveryPlatformType: + def normalize_platform(cls, v: Union[str, ShallowDiscoveryPlatformType, None]) -> ShallowDiscoveryPlatformType: """Normalize platform type (case-insensitive).""" return ShallowDiscoveryPlatformType.normalize(v) @@ -119,18 +109,12 @@ class SwitchDiscoveryModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" hostname: str = Field(..., description="Switch host name") ip: str = Field(..., description="Switch IPv4/v6 address") - serial_number: str = Field( - ..., alias="serialNumber", description="Switch serial number" - ) + serial_number: str = Field(..., alias="serialNumber", description="Switch serial number") model: str = Field(..., description="Switch model") - software_version: Optional[str] = Field( - default=None, alias="softwareVersion", description="Switch software version" - ) + software_version: Optional[str] = Field(default=None, alias="softwareVersion", description="Switch software version") vdc_id: Optional[int] = Field( default=None, alias="vdcId", @@ -142,9 +126,7 @@ class SwitchDiscoveryModel(NDBaseModel): alias="vdcMac", description="N7K VDC Mac address. Mandatory for N7K switch discovery", ) - switch_role: Optional[SwitchRole] = Field( - default=None, alias="switchRole", description="Switch role" - ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole", description="Switch role") @field_validator("hostname", mode="before") @classmethod @@ -184,13 +166,9 @@ class AddSwitchesRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] - switches: List[SwitchDiscoveryModel] = Field( - ..., min_length=1, description="The list of switches to be imported" - ) + switches: List[SwitchDiscoveryModel] = Field(..., min_length=1, description="The list of switches to be imported") platform_type: PlatformType = Field( default=PlatformType.NX_OS, alias="platformType", @@ -211,12 +189,8 @@ class AddSwitchesRequestModel(NDBaseModel): alias="useCredentialForWrite", description="Flag to use the discovery credential as LAN credential", ) - username: Optional[str] = Field( - default=None, description="User name for switch login" - ) - password: Optional[str] = Field( - default=None, description="User password for switch login" - ) + username: Optional[str] = Field(default=None, description="User name for switch login") + password: Optional[str] = Field(default=None, description="User password for switch login") remote_credential_store: Optional[RemoteCredentialStore] = Field( default=None, alias="remoteCredentialStore", @@ -233,16 +207,12 @@ def to_payload(self) -> Dict[str, Any]: payload = self.model_dump(by_alias=True, exclude_none=True) # Convert nested switches to payload format if "switches" in payload: - payload["switches"] = [ - s.to_payload() if hasattr(s, "to_payload") else s for s in self.switches - ] + payload["switches"] = [s.to_payload() if hasattr(s, "to_payload") else s for s in self.switches] return payload @field_validator("snmp_v3_auth_protocol", mode="before") @classmethod - def normalize_snmp_auth( - cls, v: Union[str, SnmpV3AuthProtocol, None] - ) -> SnmpV3AuthProtocol: + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: """Normalize SNMP auth protocol (case-insensitive: MD5, md5, etc.).""" return SnmpV3AuthProtocol.normalize(v) diff --git a/plugins/module_utils/models/manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py index a79a0ec8..23a54d01 100644 --- a/plugins/module_utils/models/manage_switches/enums.py +++ b/plugins/module_utils/models/manage_switches/enums.py @@ -71,9 +71,7 @@ def from_user_input(cls, value: str) -> "SwitchRole": try: return cls(camel_case) except ValueError: - raise ValueError( - f"Invalid switch role: {value}. Valid options: {cls.choices()}" - ) + raise ValueError(f"Invalid switch role: {value}. Valid options: {cls.choices()}") @classmethod def normalize(cls, value: Union[str, "SwitchRole", None]) -> "SwitchRole": @@ -178,9 +176,7 @@ def choices(cls) -> List[str]: return [e.value for e in cls] @classmethod - def normalize( - cls, value: Union[str, "ShallowDiscoveryPlatformType", None] - ) -> "ShallowDiscoveryPlatformType": + def normalize(cls, value: Union[str, "ShallowDiscoveryPlatformType", None]) -> "ShallowDiscoveryPlatformType": """ Normalize input to enum value (case-insensitive). Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. @@ -194,9 +190,7 @@ def normalize( for pt in cls: if pt.value == v_normalized: return pt - raise ValueError( - f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}" - ) + raise ValueError(f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}") class SnmpV3AuthProtocol(str, Enum): @@ -231,9 +225,7 @@ def choices(cls) -> List[str]: return [e.value for e in cls] @classmethod - def normalize( - cls, value: Union[str, "SnmpV3AuthProtocol", None] - ) -> "SnmpV3AuthProtocol": + def normalize(cls, value: Union[str, "SnmpV3AuthProtocol", None]) -> "SnmpV3AuthProtocol": """ Normalize input to enum value (case-insensitive). Accepts: MD5, md5, MD5_DES, md5-des, etc. diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index 64986376..ce5f4aae 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -41,9 +41,7 @@ class PreProvisionSwitchModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["serial_number"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] # --- preProvisionSpecific fields (required) --- @@ -170,9 +168,7 @@ def validate_serial(cls, v: str) -> str: @classmethod def validate_gateway(cls, v: str) -> str: if not v or "/" not in v: - raise ValueError( - "gatewayIpMask must include subnet mask (e.g., 10.23.244.1/24)" - ) + raise ValueError("gatewayIpMask must include subnet mask (e.g., 10.23.244.1/24)") try: ip_network(v, strict=False) except Exception as exc: @@ -203,9 +199,7 @@ class PreProvisionSwitchesRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" switches: List[PreProvisionSwitchModel] = Field( ..., description="PowerOn Auto Provisioning switches", diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index c699ace0..88c00571 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -41,14 +41,10 @@ class RMASwitchModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["new_switch_id"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] # From bootstrapBase - gateway_ip_mask: str = Field( - ..., alias="gatewayIpMask", description="Gateway IP address with mask" - ) + gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") model: str = Field(..., description="Model of the bootstrap switch") software_version: str = Field( ..., @@ -63,27 +59,17 @@ class RMASwitchModel(NDBaseModel): switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") # From bootstrapCredential - password: str = Field( - ..., description="Switch password to be set during bootstrap for admin user" - ) - discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., alias="discoveryAuthProtocol" - ) + password: str = Field(..., description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") - remote_credential_store: RemoteCredentialStore = Field( - default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore" - ) - remote_credential_store_key: Optional[str] = Field( - default=None, alias="remoteCredentialStoreKey" - ) + remote_credential_store: RemoteCredentialStore = Field(default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore") + remote_credential_store_key: Optional[str] = Field(default=None, alias="remoteCredentialStoreKey") # From RMASpecific hostname: str = Field(..., description="Hostname of the switch") ip: str = Field(..., description="IP address of the switch") - new_switch_id: str = Field( - ..., alias="newSwitchId", description="SwitchId (serial number) of the switch" - ) + new_switch_id: str = Field(..., alias="newSwitchId", description="SwitchId (serial number) of the switch") public_key: str = Field(..., alias="publicKey", description="Public Key") finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") dhcp_bootstrap_ip: Optional[str] = Field(default=None, alias="dhcpBootstrapIp") @@ -139,15 +125,11 @@ def validate_rma_credentials(self) -> "RMASwitchModel": if self.use_new_credentials: if self.remote_credential_store == RemoteCredentialStore.CYBERARK: if not self.remote_credential_store_key: - raise ValueError( - "remote_credential_store_key is required when " - "remote_credential_store is 'cyberark'" - ) + raise ValueError("remote_credential_store_key is required when remote_credential_store is 'cyberark'") elif self.remote_credential_store == RemoteCredentialStore.LOCAL: if not self.discovery_username or not self.discovery_password: raise ValueError( - "discovery_username and discovery_password are required when " - "remote_credential_store is 'local' and use_new_credentials is True" + "discovery_username and discovery_password are required when remote_credential_store is 'local' and use_new_credentials is True" ) return self diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index 07f38e4b..0c79f988 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -36,9 +36,7 @@ class SwitchCredentialsRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "singleton" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" switch_ids: List[str] = Field( ..., @@ -46,12 +44,8 @@ class SwitchCredentialsRequestModel(NDBaseModel): min_length=1, description="List of switch serial numbers", ) - switch_username: Optional[str] = Field( - default=None, alias="switchUsername", description="Switch username" - ) - switch_password: Optional[str] = Field( - default=None, alias="switchPassword", description="Switch password" - ) + switch_username: Optional[str] = Field(default=None, alias="switchUsername", description="Switch username") + switch_password: Optional[str] = Field(default=None, alias="switchPassword", description="Switch password") remote_credential_store_key: Optional[str] = Field( default=None, alias="remoteCredentialStoreKey", @@ -81,13 +75,8 @@ def validate_switch_ids(cls, v: List[str]) -> List[str]: @model_validator(mode="after") def validate_credentials(self) -> "SwitchCredentialsRequestModel": """Ensure either local or remote credentials are provided.""" - has_local = ( - self.switch_username is not None and self.switch_password is not None - ) - has_remote = ( - self.remote_credential_store_key is not None - and self.remote_credential_store_type is not None - ) + has_local = self.switch_username is not None and self.switch_password is not None + has_remote = self.remote_credential_store_key is not None and self.remote_credential_store_type is not None if not has_local and not has_remote: raise ValueError( "Either local credentials (switchUsername + switchPassword) " @@ -104,9 +93,7 @@ class ChangeSwitchSerialNumberRequestModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["new_switch_id"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" new_switch_id: str = Field(..., alias="newSwitchId", description="New switchId") @field_validator("new_switch_id", mode="before") diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 759f131c..4d3db2c4 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -44,12 +44,8 @@ class TelemetryIpCollection(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - inband_ipv4_address: Optional[str] = Field( - default=None, alias="inbandIpV4Address", description="Inband IPv4 address" - ) - inband_ipv6_address: Optional[str] = Field( - default=None, alias="inbandIpV6Address", description="Inband IPv6 address" - ) + inband_ipv4_address: Optional[str] = Field(default=None, alias="inbandIpV4Address", description="Inband IPv4 address") + inband_ipv6_address: Optional[str] = Field(default=None, alias="inbandIpV6Address", description="Inband IPv6 address") out_of_band_ipv4_address: Optional[str] = Field( default=None, alias="outOfBandIpV4Address", @@ -73,12 +69,8 @@ class VpcData(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - vpc_domain: int = Field( - ..., alias="vpcDomain", ge=1, le=1000, description="vPC domain ID" - ) - peer_switch_id: str = Field( - ..., alias="peerSwitchId", description="vPC peer switch serial number" - ) + vpc_domain: int = Field(..., alias="vpcDomain", ge=1, le=1000, description="vPC domain ID") + peer_switch_id: str = Field(..., alias="peerSwitchId", description="vPC peer switch serial number") consistent_status: Optional[bool] = Field( default=None, alias="consistentStatus", @@ -89,18 +81,10 @@ class VpcData(NDNestedModel): alias="intendedPeerName", description="Intended vPC host name for pre-provisioned peer switch", ) - keep_alive_status: Optional[str] = Field( - default=None, alias="keepAliveStatus", description="vPC peer keep alive status" - ) - peer_link_status: Optional[str] = Field( - default=None, alias="peerLinkStatus", description="vPC peer link status" - ) - peer_name: Optional[str] = Field( - default=None, alias="peerName", description="vPC peer switch name" - ) - vpc_role: Optional[VpcRole] = Field( - default=None, alias="vpcRole", description="The vPC role" - ) + keep_alive_status: Optional[str] = Field(default=None, alias="keepAliveStatus", description="vPC peer keep alive status") + peer_link_status: Optional[str] = Field(default=None, alias="peerLinkStatus", description="vPC peer link status") + peer_name: Optional[str] = Field(default=None, alias="peerName", description="vPC peer switch name") + vpc_role: Optional[VpcRole] = Field(default=None, alias="vpcRole", description="The vPC role") @field_validator("peer_switch_id", mode="before") @classmethod @@ -117,12 +101,8 @@ class SwitchMetadata(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - switch_db_id: Optional[int] = Field( - default=None, alias="switchDbId", description="Database Id of the switch" - ) - switch_uuid: Optional[str] = Field( - default=None, alias="switchUuid", description="Internal unique Id of the switch" - ) + switch_db_id: Optional[int] = Field(default=None, alias="switchDbId", description="Database Id of the switch") + switch_uuid: Optional[str] = Field(default=None, alias="switchUuid", description="Internal unique Id of the switch") class AdditionalSwitchData(NDNestedModel): @@ -131,18 +111,10 @@ class AdditionalSwitchData(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - usage: Optional[str] = Field( - default="others", description="The usage of additional data" - ) - config_sync_status: Optional[ConfigSyncStatus] = Field( - default=None, alias="configSyncStatus", description="Configuration sync status" - ) - discovery_status: Optional[DiscoveryStatus] = Field( - default=None, alias="discoveryStatus", description="Discovery status" - ) - domain_name: Optional[str] = Field( - default=None, alias="domainName", description="Domain name" - ) + usage: Optional[str] = Field(default="others", description="The usage of additional data") + config_sync_status: Optional[ConfigSyncStatus] = Field(default=None, alias="configSyncStatus", description="Configuration sync status") + discovery_status: Optional[DiscoveryStatus] = Field(default=None, alias="discoveryStatus", description="Discovery status") + domain_name: Optional[str] = Field(default=None, alias="domainName", description="Domain name") smart_switch: Optional[bool] = Field( default=None, alias="smartSwitch", @@ -153,9 +125,7 @@ class AdditionalSwitchData(NDNestedModel): alias="hypershieldConnectivityStatus", description="Smart switch connectivity status to hypershield controller", ) - hypershield_tenant: Optional[str] = Field( - default=None, alias="hypershieldTenant", description="Hypershield tenant name" - ) + hypershield_tenant: Optional[str] = Field(default=None, alias="hypershieldTenant", description="Hypershield tenant name") hypershield_integration_name: Optional[str] = Field( default=None, alias="hypershieldIntegrationName", @@ -171,26 +141,14 @@ class AdditionalSwitchData(NDNestedModel): alias="sourceVrfName", description="Source VRF for switch discovery", ) - platform_type: Optional[PlatformType] = Field( - default=None, alias="platformType", description="Platform type of the switch" - ) - discovered_system_mode: Optional[SystemMode] = Field( - default=None, alias="discoveredSystemMode", description="Discovered system mode" - ) - intended_system_mode: Optional[SystemMode] = Field( - default=None, alias="intendedSystemMode", description="Intended system mode" - ) - scalable_unit: Optional[str] = Field( - default=None, alias="scalableUnit", description="Name of the scalable unit" - ) - system_mode: Optional[SystemMode] = Field( - default=None, alias="systemMode", description="System mode" - ) + platform_type: Optional[PlatformType] = Field(default=None, alias="platformType", description="Platform type of the switch") + discovered_system_mode: Optional[SystemMode] = Field(default=None, alias="discoveredSystemMode", description="Discovered system mode") + intended_system_mode: Optional[SystemMode] = Field(default=None, alias="intendedSystemMode", description="Intended system mode") + scalable_unit: Optional[str] = Field(default=None, alias="scalableUnit", description="Name of the scalable unit") + system_mode: Optional[SystemMode] = Field(default=None, alias="systemMode", description="System mode") vendor: Optional[str] = Field(default=None, description="Vendor of the switch") username: Optional[str] = Field(default=None, description="Discovery user name") - remote_credential_store: Optional[RemoteCredentialStore] = Field( - default=None, alias="remoteCredentialStore" - ) + remote_credential_store: Optional[RemoteCredentialStore] = Field(default=None, alias="remoteCredentialStore") meta: Optional[SwitchMetadata] = Field(default=None, description="Switch metadata") @@ -200,12 +158,8 @@ class AdditionalAciSwitchData(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - usage: Optional[str] = Field( - default="aci", description="The usage of additional data" - ) - admin_status: Optional[Literal["inService", "outOfService"]] = Field( - default=None, alias="adminStatus", description="Admin status" - ) + usage: Optional[str] = Field(default="aci", description="The usage of additional data") + admin_status: Optional[Literal["inService", "outOfService"]] = Field(default=None, alias="adminStatus", description="Admin status") health_score: Optional[int] = Field( default=None, alias="healthScore", @@ -223,26 +177,16 @@ class AdditionalAciSwitchData(NDNestedModel): alias="lastSoftwareUpdateTime", description="Timestamp when the software is last updated", ) - node_id: Optional[int] = Field( - default=None, alias="nodeId", ge=1, description="Node ID" - ) - node_status: Optional[Literal["active", "inActive"]] = Field( - default=None, alias="nodeStatus", description="Node status" - ) - pod_id: Optional[int] = Field( - default=None, alias="podId", ge=1, description="Pod ID" - ) - remote_leaf_group_name: Optional[str] = Field( - default=None, alias="remoteLeafGroupName", description="Remote leaf group name" - ) + node_id: Optional[int] = Field(default=None, alias="nodeId", ge=1, description="Node ID") + node_status: Optional[Literal["active", "inActive"]] = Field(default=None, alias="nodeStatus", description="Node status") + pod_id: Optional[int] = Field(default=None, alias="podId", ge=1, description="Pod ID") + remote_leaf_group_name: Optional[str] = Field(default=None, alias="remoteLeafGroupName", description="Remote leaf group name") switch_added: Optional[str] = Field( default=None, alias="switchAdded", description="Timestamp when the switch is added", ) - tep_pool: Optional[str] = Field( - default=None, alias="tepPool", description="TEP IP pool" - ) + tep_pool: Optional[str] = Field(default=None, alias="tepPool", description="TEP IP pool") class Metadata(NDNestedModel): @@ -252,9 +196,7 @@ class Metadata(NDNestedModel): identifiers: ClassVar[List[str]] = [] - counts: Optional[Dict[str, int]] = Field( - default=None, description="Count information including total and remaining" - ) + counts: Optional[Dict[str, int]] = Field(default=None, description="Count information including total and remaining") class SwitchDataModel(NDBaseModel): @@ -265,9 +207,7 @@ class SwitchDataModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["switch_id"] - identifier_strategy: ClassVar[ - Optional[Literal["single", "composite", "hierarchical", "singleton"]] - ] = "single" + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" switch_id: str = Field( ..., alias="switchId", @@ -278,10 +218,8 @@ class SwitchDataModel(NDBaseModel): alias="serialNumber", description="Serial number of switch or APIC controller node", ) - additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = ( - Field( - default=None, alias="additionalData", description="Additional switch data" - ) + additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = Field( + default=None, alias="additionalData", description="Additional switch data" ) advisory_level: Optional[AdvisoryLevel] = Field(default=None, alias="advisoryLevel") anomaly_level: Optional[AnomalyLevel] = Field(default=None, alias="anomalyLevel") @@ -291,34 +229,24 @@ class SwitchDataModel(NDBaseModel): alias="fabricManagementIp", description="Switch IPv4/v6 address used for management", ) - fabric_name: Optional[str] = Field( - default=None, alias="fabricName", description="Fabric name", max_length=64 - ) - fabric_type: Optional[str] = Field( - default=None, alias="fabricType", description="Fabric type" - ) + fabric_name: Optional[str] = Field(default=None, alias="fabricName", description="Fabric name", max_length=64) + fabric_type: Optional[str] = Field(default=None, alias="fabricType", description="Fabric type") hostname: Optional[str] = Field(default=None, description="Switch host name") - model: Optional[str] = Field( - default=None, description="Model of switch or APIC controller node" - ) + model: Optional[str] = Field(default=None, description="Model of switch or APIC controller node") software_version: Optional[str] = Field( default=None, alias="softwareVersion", description="Software version of switch or APIC controller node", ) switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") - system_up_time: Optional[str] = Field( - default=None, alias="systemUpTime", description="System up time" - ) + system_up_time: Optional[str] = Field(default=None, alias="systemUpTime", description="System up time") vpc_configured: Optional[bool] = Field( default=None, alias="vpcConfigured", description="Flag to indicate switch is part of a vPC domain", ) vpc_data: Optional[VpcData] = Field(default=None, alias="vpcData") - telemetry_ip_collection: Optional[TelemetryIpCollection] = Field( - default=None, alias="telemetryIpCollection" - ) + telemetry_ip_collection: Optional[TelemetryIpCollection] = Field(default=None, alias="telemetryIpCollection") @field_validator("additional_data", mode="before") @classmethod diff --git a/plugins/module_utils/models/manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py index 3dcdd3a6..626b4865 100644 --- a/plugins/module_utils/models/manage_switches/validators.py +++ b/plugins/module_utils/models/manage_switches/validators.py @@ -60,9 +60,7 @@ def validate_serial_number(v: Optional[str]) -> Optional[str]: return None # Serial numbers are typically alphanumeric with optional hyphens if not re.match(r"^[A-Za-z0-9_-]+$", v): - raise ValueError( - f"Serial number must be alphanumeric with optional hyphens/underscores: {v}" - ) + raise ValueError(f"Serial number must be alphanumeric with optional hyphens/underscores: {v}") return v @staticmethod @@ -78,10 +76,7 @@ def validate_hostname(v: Optional[str]) -> Optional[str]: raise ValueError("Hostname cannot exceed 255 characters") # Allow alphanumeric, dots, hyphens, underscores if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9._-]*$", v): - raise ValueError( - f"Invalid hostname format. Must start with alphanumeric and " - f"contain only alphanumeric, dots, hyphens, underscores: {v}" - ) + raise ValueError(f"Invalid hostname format. Must start with alphanumeric and " f"contain only alphanumeric, dots, hyphens, underscores: {v}") if v.startswith(".") or v.endswith(".") or ".." in v: raise ValueError(f"Invalid hostname format (dots): {v}") return v diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 99e7ce5c..34ae5f38 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -483,9 +483,7 @@ def main(): sw_module.manage_state() # Exit with results - log.info( - "State management completed successfully. Changed: %s", results.changed - ) + log.info("State management completed successfully. Changed: %s", results.changed) sw_module.exit_json() except NDModuleError as error: diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt new file mode 100644 index 00000000..c3ca4236 --- /dev/null +++ b/tests/sanity/ignore-2.17.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt new file mode 100644 index 00000000..c3ca4236 --- /dev/null +++ b/tests/sanity/ignore-2.18.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/sanity/ignore-2.19.txt b/tests/sanity/ignore-2.19.txt new file mode 100644 index 00000000..c3ca4236 --- /dev/null +++ b/tests/sanity/ignore-2.19.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py index efd7c931..60267297 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -85,9 +85,7 @@ def test_endpoints_api_v1_manage_fabrics_00030(): - FabricConfigDeployEndpointParams.to_query_string() """ with does_not_raise(): - params = FabricConfigDeployEndpointParams( - force_show_run=True, incl_all_msd_switches=True - ) + params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) result = params.to_query_string() assert "forceShowRun=true" in result assert "inclAllMsdSwitches=true" in result @@ -201,10 +199,7 @@ def test_endpoints_api_v1_manage_fabrics_00130(): instance.fabric_name = "MyFabric" instance.endpoint_params.force_show_run = True result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" # ============================================================================= diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py index 49fa0b49..72802bfc 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py @@ -209,10 +209,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00230(): instance.fabric_name = "MyFabric" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" # ============================================================================= @@ -304,9 +301,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00330(): instance.endpoint_params.cluster_name = "cluster1" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result.startswith( - "/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?" - ) + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?") assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -400,9 +395,7 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00430(): instance.endpoint_params.cluster_name = "cluster1" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert result.startswith( - "/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?" - ) + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?") assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -495,7 +488,4 @@ def test_endpoints_api_v1_manage_fabrics_switchactions_00730(): instance.fabric_name = "MyFabric" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py index 64e3dbd9..6ee60ef1 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py @@ -133,9 +133,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00050(): - FabricSwitchesAddEndpointParams.to_query_string() """ with does_not_raise(): - params = FabricSwitchesAddEndpointParams( - cluster_name="cluster1", ticket_id="CHG12345" - ) + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") result = params.to_query_string() assert "clusterName=cluster1" in result assert "ticketId=CHG12345" in result @@ -475,10 +473,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00530(): instance.fabric_name = "MyFabric" instance.switch_sn = "SAL1948TRTT" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" def test_endpoints_api_v1_manage_fabrics_switches_00540(): @@ -501,10 +496,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00540(): instance.switch_sn = "SAL1948TRTT" instance.endpoint_params.ticket_id = "CHG12345" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" # ============================================================================= @@ -595,10 +587,7 @@ def test_endpoints_api_v1_manage_fabrics_switches_00630(): instance.fabric_name = "MyFabric" instance.switch_sn = "SAL1948TRTT" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" def test_endpoints_api_v1_manage_fabrics_switches_00640(): @@ -621,7 +610,4 @@ def test_endpoints_api_v1_manage_fabrics_switches_00640(): instance.switch_sn = "SAL1948TRTT" instance.endpoint_params.cluster_name = "cluster1" result = instance.path - assert ( - result - == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" - ) + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1" From 3c54a5ca0cd8a41461d30216d775288c1bfd8a25 Mon Sep 17 00:00:00 2001 From: samitab Date: Tue, 17 Mar 2026 23:03:19 +1000 Subject: [PATCH 083/109] [ignore] Enable CI unit tests --- .github/workflows/ansible-test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index 21cbabf7..8a91e417 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -158,7 +158,6 @@ jobs: units: name: Units in ubuntu-latest - if: false # No unit tests yet needs: - importer runs-on: ubuntu-latest @@ -202,7 +201,7 @@ jobs: integration: name: Integration in ubuntu-latest needs: - # - units + - units - sanity runs-on: ubuntu-latest strategy: From 0af63e6c8286f239308f8c30530efa29a800f688 Mon Sep 17 00:00:00 2001 From: samitab Date: Mon, 23 Mar 2026 18:39:15 +1000 Subject: [PATCH 084/109] [ignore] Add pydantic to requirements.txt --- requirements.txt | 2 +- tests/integration/network-integration.requirements.txt | 2 +- tests/unit/requirements.txt | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 tests/unit/requirements.txt diff --git a/requirements.txt b/requirements.txt index 98907e9a..cc6b2c4b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ requests_toolbelt jsonpath-ng lxml -pydantic==2.12.5 \ No newline at end of file +pydantic==2.12.5 diff --git a/tests/integration/network-integration.requirements.txt b/tests/integration/network-integration.requirements.txt index 98907e9a..cc6b2c4b 100644 --- a/tests/integration/network-integration.requirements.txt +++ b/tests/integration/network-integration.requirements.txt @@ -1,4 +1,4 @@ requests_toolbelt jsonpath-ng lxml -pydantic==2.12.5 \ No newline at end of file +pydantic==2.12.5 diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt new file mode 100644 index 00000000..98907e9a --- /dev/null +++ b/tests/unit/requirements.txt @@ -0,0 +1,4 @@ +requests_toolbelt +jsonpath-ng +lxml +pydantic==2.12.5 \ No newline at end of file From 62d1069406da29c516f497dd4254ebe07a70238f Mon Sep 17 00:00:00 2001 From: AKDRG Date: Tue, 31 Mar 2026 22:20:45 +0530 Subject: [PATCH 085/109] Fix Overridden + Config Model Identifier + Docs --- .../manage_switches/nd_switch_resources.py | 14 ++++++-- .../manage_switches/bootstrap_models.py | 4 +-- .../models/manage_switches/config_models.py | 1 + .../manage_switches/switch_data_models.py | 1 + plugins/modules/nd_manage_switches.py | 36 ++++--------------- 5 files changed, 21 insertions(+), 35 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 6a30b47f..376eb086 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2376,9 +2376,14 @@ def manage_state(self) -> None: proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config else None return self._handle_deleted_state(proposed_config) - # merged / overridden — config is required - if not self.config: - self.nd.module.fail_json(msg=f"'config' is required for '{self.state}' state.") + # merged — config is required + if self.state == "merged" and not self.config: + self.nd.module.fail_json(msg="'config' is required for 'merged' state.") + + # overridden with no/empty config — desired state is zero switches, delete all + if self.state == "overridden" and not self.config: + self.log.info("Overridden state with no config — deleting all switches from fabric") + return self._handle_deleted_state(None) proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) # Partition configs by operation type @@ -2585,6 +2590,9 @@ def _handle_merged_state( if not switch_actions: self.log.info("No switch actions to process after add/migration collection") + if idempotent_save_req: + self.log.info("Config save and deploy required for out-of-sync idempotent switch(es)") + self.fabric_ops.finalize() return # Common post-processing for all switches (new + migration) diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index 68931315..f413a9f6 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -141,8 +141,8 @@ class BootstrapImportSpecificModel(NDBaseModel): Switch-identifying fields returned by the bootstrap GET API prior to import. """ - identifiers: ClassVar[List[str]] = [] - identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" hostname: str = Field(..., description="Hostname of the bootstrap switch") ip: str = Field(..., description="IP address of the bootstrap switch") serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index dc45c2d2..2e8daf61 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -303,6 +303,7 @@ class SwitchConfigModel(NDBaseModel): """ identifiers: ClassVar[List[str]] = ["seed_ip"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" # Fields excluded from diff — only seed_ip + role are compared exclude_from_diff: ClassVar[List[str]] = [ diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 4d3db2c4..80dbe70a 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -208,6 +208,7 @@ class SwitchDataModel(NDBaseModel): identifiers: ClassVar[List[str]] = ["switch_id"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[set] = {"system_up_time", "anomaly_level", "advisory_level", "alert_suspend"} switch_id: str = Field( ..., alias="switchId", diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 34ae5f38..2663cb51 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -300,10 +300,12 @@ username: admin password: "{{ switch_password }}" role: leaf + preserve_config: false - seed_ip: 192.168.10.202 username: admin password: "{{ switch_password }}" role: spine + preserve_config: false state: merged - name: Preprovision a switch via POAP @@ -313,6 +315,7 @@ - seed_ip: 192.168.10.1 username: admin password: "{{ switch_password }}" + role: spine preprovision: serial_number: SAL1234ABCD model: N9K-C93180YC-EX @@ -331,6 +334,7 @@ - seed_ip: 192.168.10.1 username: admin password: "{{ switch_password }}" + role: leaf poap: serial_number: SAL5678EFGH hostname: leaf-bootstrap @@ -365,6 +369,7 @@ image_policy: my-image-policy ip: 192.168.10.50 gateway_ip: 192.168.10.1/24 + discovery_username: root discovery_password: "{{ discovery_password }}" state: merged @@ -384,35 +389,7 @@ """ -RETURN = """ -previous: - description: The configuration prior to the module execution. - returned: when state is not gathered - type: list - elements: dict -proposed: - description: The proposed configuration sent to the API. - returned: when state is not gathered - type: list - elements: dict -sent: - description: The configuration sent to the API. - returned: when state is not gathered - type: list - elements: dict -current: - description: The current configuration after module execution. - returned: when state is not gathered - type: list - elements: dict -gathered: - description: - - The current fabric switch inventory returned in config format. - - Each entry mirrors the C(config) input schema (seed_ip, role, - auth_proto, preserve_config). Credentials are replaced with placeholders. - returned: when state is gathered - type: list - elements: dict +RETURN = r""" """ import logging @@ -449,7 +426,6 @@ def main(): supports_check_mode=True, required_if=[ ("state", "merged", ["config"]), - ("state", "overridden", ["config"]), ], ) From 03eba1bdae6e7dbfaad35868123610335954b1ca Mon Sep 17 00:00:00 2001 From: AKDRG Date: Wed, 1 Apr 2026 15:28:10 +0530 Subject: [PATCH 086/109] Complete Overhaul of Switch Resources to Support POAP/Preprovision Overridden State --- .../manage_switches/nd_switch_resources.py | 1008 ++++++++++------- plugins/modules/nd_manage_switches.py | 10 +- 2 files changed, 584 insertions(+), 434 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 376eb086..d75bd37f 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -132,6 +132,58 @@ class SwitchServiceContext: # ========================================================================= +@dataclass +class SwitchPlan: + """Unified action plan produced by :meth:`SwitchDiffEngine.compute_changes`. + + All lists contain :class:`SwitchConfigModel` objects so that every state + handler receives the original user config (credentials, role, etc.) and can + act on it directly. Existing inventory entries are kept alongside only + where removal requires a serial number. + + Attributes: + to_add: New normal switches that need ``bulk_add``. + to_update: Normal switches already in fabric but with field + differences — remove-and-re-add (overridden only). + to_delete: Switches in fabric that have no corresponding config + entry (overridden / deleted states). + migration_mode: Normal switches currently in migration mode — no add + needed, but role update and finalize are applied. + idempotent: Normal switches that match desired state exactly. + to_bootstrap: POAP bootstrap configs that need the import-bootstrap + API call (switch not in fabric, or mismatch + unreachable). + normal_readd: POAP/preprovision configs whose switch *is* reachable + and can be re-added via the normal bulk_add path. + to_preprovision: Pre-provision configs that need the preProvision API call. + to_swap: Serial-swap configs (poap + preprovision both present). + to_rma: RMA configs. + poap_ips: Seed IPs of all POAP/preprovision/swap configs — used by + overridden to skip these IPs during the cleanup sweep. + to_delete_existing: Existing ``SwitchDataModel`` records for switches that + must be deleted before re-add (POAP/preprovision mismatches + and overridden normal updates). Kept parallel to the + config-level lists above. + """ + + # Normal-switch diff buckets (config side) + to_add: List["SwitchConfigModel"] + to_update: List["SwitchConfigModel"] + to_delete: List["SwitchDataModel"] + migration_mode: List["SwitchConfigModel"] + idempotent: List["SwitchConfigModel"] + + # POAP/preprovision/swap/RMA buckets + to_bootstrap: List["SwitchConfigModel"] + normal_readd: List["SwitchConfigModel"] + to_preprovision: List["SwitchConfigModel"] + to_swap: List["SwitchConfigModel"] + to_rma: List["SwitchConfigModel"] + + # Cross-cutting helpers + poap_ips: set + to_delete_existing: List["SwitchDataModel"] + + class SwitchDiffEngine: """Provide stateless validation and diff computation helpers.""" @@ -216,42 +268,60 @@ def validate_configs( @staticmethod def compute_changes( - proposed: List[SwitchDataModel], + proposed_configs: List[SwitchConfigModel], existing: List[SwitchDataModel], log: logging.Logger, - ) -> Dict[str, List[SwitchDataModel]]: - """Compare proposed and existing switches and categorize changes. + ) -> "SwitchPlan": + """Classify all proposed configs against the current fabric inventory. + + Accepts the full mix of normal, POAP/preprovision, swap, and RMA configs + and produces a unified :class:`SwitchPlan` that each state handler can + act on directly. This is the single idempotency gate for all operation + types. + + Idempotency rules by operation type: + + * **normal** — compare ``seed_ip``, ``serial_number`` (via discovery), + ``hostname``, ``model``, ``software_version``, and ``role`` against + the existing inventory. + * **poap / preprovision** — compare ``seed_ip``, ``serial_number`` + (from ``poap.serial_number`` / ``preprovision.serial_number``), and + ``role`` against the existing inventory. If all three match the + switch is idempotent and skipped. On a mismatch the routing depends + on ``discovery_status``: + + - Bootstrap mismatch, ``discovery_status == OK`` → ``normal_readd`` + - Bootstrap mismatch, anything else → ``to_bootstrap`` + - Preprovision mismatch, ``discovery_status == UNREACHABLE`` → ``to_preprovision`` + - Preprovision mismatch, anything else → ``normal_readd`` + + * **swap** — always active (no idempotency check; the caller validates + preconditions). + * **rma** — always active (no idempotency check; caller validates). Args: - proposed: Switch models representing desired state. - existing: Switch models currently present in inventory. + proposed_configs: All validated switch configs for this run. + existing: Current fabric inventory snapshot. log: Logger instance. Returns: - Dict mapping change buckets to switch lists. Buckets are - ``to_add``, ``to_update``, ``to_delete``, ``migration_mode``, - and ``idempotent``. + :class:`SwitchPlan` with all buckets populated. """ log.debug("ENTER: compute_changes()") - log.debug( - "Comparing %s proposed vs %s existing switches", - len(proposed), + log.info( + "compute_changes: %s proposed config(s) vs %s existing switch(es)", + len(proposed_configs), len(existing), ) - # Build indexes for O(1) lookups - existing_by_id = {sw.switch_id: sw for sw in existing} - existing_by_ip = {sw.fabric_management_ip: sw for sw in existing} - - log.debug( - "Indexes built — existing_by_id: %s, existing_by_ip: %s", - list(existing_by_id.keys()), - list(existing_by_ip.keys()), - ) + existing_by_ip: Dict[str, SwitchDataModel] = { + sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip + } + existing_by_id: Dict[str, SwitchDataModel] = { + sw.switch_id: sw for sw in existing if sw.switch_id + } - # Only user-controllable fields populated by both discovery and - # inventory APIs. Server-managed fields (uptime, alerts, vpc info, - # telemetry, etc.) are ignored. + # Fields compared for normal switches compare_fields = { "switch_id", "serial_number", @@ -262,95 +332,196 @@ def compute_changes( "switch_role", } - changes: Dict[str, list] = { - "to_add": [], - "to_update": [], - "to_delete": [], - "migration_mode": [], - "idempotent": [], - } + # Output buckets + to_add: List[SwitchConfigModel] = [] + to_update: List[SwitchConfigModel] = [] + to_delete_existing: List[SwitchDataModel] = [] + migration_mode: List[SwitchConfigModel] = [] + idempotent: List[SwitchConfigModel] = [] + to_bootstrap: List[SwitchConfigModel] = [] + normal_readd: List[SwitchConfigModel] = [] + to_preprovision: List[SwitchConfigModel] = [] + to_swap: List[SwitchConfigModel] = [] + to_rma: List[SwitchConfigModel] = [] + poap_ips: set = set() + + # Track which existing switch IDs are accounted for by a config + accounted_ids: set = set() + + for cfg in proposed_configs: + op = cfg.operation_type + + # ------------------------------------------------------------------ + # RMA — no idempotency check; always active + # ------------------------------------------------------------------ + if op == "rma": + to_rma.append(cfg) + continue - # Categorise proposed switches - for prop_sw in proposed: - ip = prop_sw.fabric_management_ip - sid = prop_sw.switch_id + existing_sw = existing_by_ip.get(cfg.seed_ip) + if existing_sw: + accounted_ids.add(existing_sw.switch_id) + + # ------------------------------------------------------------------ + # POAP swap — both poap and preprovision blocks present + # ------------------------------------------------------------------ + if op == "swap": + poap_ips.add(cfg.seed_ip) + to_swap.append(cfg) + continue - existing_sw = existing_by_id.get(sid) - match_key = "switch_id" if existing_sw else None + # ------------------------------------------------------------------ + # POAP bootstrap + # ------------------------------------------------------------------ + if op == "poap": + poap_ips.add(cfg.seed_ip) + serial = cfg.poap.serial_number if cfg.poap else None - if not existing_sw: - existing_sw = existing_by_ip.get(ip) - if existing_sw: - match_key = "ip" + if not existing_sw: + log.info("Bootstrap %s: not in fabric — queue for bootstrap", cfg.seed_ip) + to_bootstrap.append(cfg) + continue - if not existing_sw: - log.info("Switch %s (id=%s) not found in existing — marking to_add", ip, sid) - changes["to_add"].append(prop_sw) + serial_match = serial and serial in (existing_sw.serial_number, existing_sw.switch_id) + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + if serial_match and role_match: + log.info( + "Bootstrap %s serial=%s role=%s — idempotent, skipping", + cfg.seed_ip, serial, cfg.role, + ) + idempotent.append(cfg) + continue + + status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None + log.info( + "Bootstrap %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", + cfg.seed_ip, serial_match, role_match, + getattr(status, "value", status) if status else "unknown", + ) + to_delete_existing.append(existing_sw) + if status == DiscoveryStatus.OK: + log.info("Bootstrap %s: switch reachable — routing to normal_readd", cfg.seed_ip) + normal_readd.append(cfg) + else: + log.info("Bootstrap %s: switch unreachable — routing to bootstrap workflow", cfg.seed_ip) + to_bootstrap.append(cfg) continue - log.debug( - "Switch %s (id=%s) found in existing with %s match %s", - ip, - sid, - match_key, - existing_sw, - ) - log.debug( - "Switch %s matched existing by %s (existing_id=%s)", - ip, - match_key, - existing_sw.switch_id, - ) + # ------------------------------------------------------------------ + # Pre-provision + # ------------------------------------------------------------------ + if op == "preprovision": + poap_ips.add(cfg.seed_ip) + serial = cfg.preprovision.serial_number if cfg.preprovision else None - if existing_sw.additional_data.system_mode == SystemMode.MIGRATION: - log.info("Switch %s (%s) is in Migration mode", ip, existing_sw.switch_id) - changes["migration_mode"].append(prop_sw) - continue + if not existing_sw: + log.info("Preprovision %s: not in fabric — queue for preprovision", cfg.seed_ip) + to_preprovision.append(cfg) + continue - prop_dict = prop_sw.model_dump(by_alias=False, exclude_none=True, include=compare_fields) - existing_dict = existing_sw.model_dump(by_alias=False, exclude_none=True, include=compare_fields) + serial_match = serial and serial in (existing_sw.serial_number, existing_sw.switch_id) + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + if serial_match and role_match: + log.info( + "Preprovision %s serial=%s role=%s — idempotent, skipping", + cfg.seed_ip, serial, cfg.role, + ) + idempotent.append(cfg) + continue - if prop_dict == existing_dict: - log.debug("Switch %s is idempotent — no changes needed", ip) - changes["idempotent"].append(prop_sw) - else: - diff_keys = {k for k in set(prop_dict) | set(existing_dict) if prop_dict.get(k) != existing_dict.get(k)} + status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None log.info( - "Switch %s has differences — marking to_update. Changed fields: %s", - ip, - diff_keys, - ) - proposed_diff = {k: prop_dict.get(k) for k in diff_keys} - existing_diff = {k: existing_dict.get(k) for k in diff_keys} - log.debug( - "Switch %s diff detail — proposed: %s, existing: %s", - ip, - proposed_diff, - existing_diff, + "Preprovision %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", + cfg.seed_ip, serial_match, role_match, + getattr(status, "value", status) if status else "unknown", ) - changes["to_update"].append(prop_sw) + to_delete_existing.append(existing_sw) + if status == DiscoveryStatus.UNREACHABLE: + log.info("Preprovision %s: switch unreachable — routing to preprovision workflow", cfg.seed_ip) + to_preprovision.append(cfg) + else: + log.info("Preprovision %s: switch reachable — routing to normal_readd", cfg.seed_ip) + normal_readd.append(cfg) + continue + + # ------------------------------------------------------------------ + # Normal switch + # ------------------------------------------------------------------ + # Note: serial/id comparison happens after discovery via build_proposed; + # here we rely on the SwitchDataModel that build_proposed will produce + # being present in existing. Since this function receives SwitchConfigModel + # objects (not yet resolved to SwitchDataModel), normal-switch idempotency + # is done after discover() + build_proposed() by comparing the resulting + # SwitchDataModel against existing using compare_fields. + # + # The code below handles the case where the switch is *already* in the + # fabric (no discovery needed) and can be evaluated immediately. + if op == "normal": + if not existing_sw: + log.info("Normal %s: not in fabric — queue for discovery + add", cfg.seed_ip) + to_add.append(cfg) + continue + + if existing_sw.additional_data and existing_sw.additional_data.system_mode == SystemMode.MIGRATION: + log.info("Normal %s (%s): in migration mode", cfg.seed_ip, existing_sw.switch_id) + migration_mode.append(cfg) + continue - # Switches in existing but not in proposed (for overridden state) - proposed_ids = {sw.switch_id for sw in proposed} - for existing_sw in existing: - if existing_sw.switch_id not in proposed_ids: + # Build a lightweight comparison dict from config vs existing + # for fields we can evaluate without discovery data. + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + # IP always matches (looked up by IP), so only role matters + # for an already-in-fabric switch; other fields (model, version, + # hostname) are only verifiable after discovery. + if role_match: + log.info("Normal %s: in fabric, role matches — checking field diff after build_proposed", cfg.seed_ip) + # Defer final diff to after build_proposed; treat as to_add + # so the caller runs discovery and build_proposed, then sees + # the switch in to_update/idempotent from a second pass. + # For now simply indicate "needs evaluation" by placing in to_add. + to_add.append(cfg) + else: + log.info( + "Normal %s: role mismatch (config=%s, existing=%s) — marking to_update", + cfg.seed_ip, cfg.role, existing_sw.switch_role, + ) + to_update.append(cfg) + continue + + # Switches in fabric that no config entry accounts for + # (only meaningful for overridden / deleted states) + to_delete: List[SwitchDataModel] = [] + for sw in existing: + if sw.switch_id and sw.switch_id not in accounted_ids and sw.fabric_management_ip not in poap_ips: log.info( - "Existing switch %s (%s) not in proposed — marking to_delete", - existing_sw.fabric_management_ip, - existing_sw.switch_id, + "Existing %s (%s) has no config entry — marking to_delete", + sw.fabric_management_ip, sw.switch_id, ) - changes["to_delete"].append(existing_sw) - + to_delete.append(sw) + + plan = SwitchPlan( + to_add=to_add, + to_update=to_update, + to_delete=to_delete, + migration_mode=migration_mode, + idempotent=idempotent, + to_bootstrap=to_bootstrap, + normal_readd=normal_readd, + to_preprovision=to_preprovision, + to_swap=to_swap, + to_rma=to_rma, + poap_ips=poap_ips, + to_delete_existing=to_delete_existing, + ) log.info( - "Compute changes summary: to_add=%s, to_update=%s, to_delete=%s, migration_mode=%s, idempotent=%s", - len(changes["to_add"]), - len(changes["to_update"]), - len(changes["to_delete"]), - len(changes["migration_mode"]), - len(changes["idempotent"]), + "compute_changes: to_add=%s, to_update=%s, to_delete=%s, migration=%s, " + "idempotent=%s, bootstrap=%s, normal_readd=%s, preprov=%s, swap=%s, rma=%s", + len(plan.to_add), len(plan.to_update), len(plan.to_delete), len(plan.migration_mode), + len(plan.idempotent), len(plan.to_bootstrap), len(plan.normal_readd), + len(plan.to_preprovision), len(plan.to_swap), len(plan.to_rma), ) log.debug("EXIT: compute_changes()") - return changes + return plan @staticmethod def validate_switch_api_fields( @@ -2356,9 +2527,16 @@ def exit_json(self) -> None: def manage_state(self) -> None: """Dispatch the requested module state to the appropriate workflow. - This method validates input, routes POAP and RMA operations to dedicated - handlers, and executes state-specific orchestration for merged, - overridden, and deleted operations. + Unified entry point for all states. The flow is: + + 1. Validate and route simple states (gathered, deleted). + 2. Validate the full config, enforce state constraints. + 3. Call ``compute_changes`` with **all** configs in one pass — this + classifies normal, POAP/preprovision, swap, and RMA configs against + the current fabric inventory and handles idempotency. + 4. Discover all switches that need it in **one combined call**. + 5. Delegate to the appropriate state handler with the populated plan + and the single ``discovered_data`` dict. Returns: None. @@ -2371,76 +2549,73 @@ def manage_state(self) -> None: self.nd.module.fail_json(msg="'config' must not be provided for 'gathered' state.") return self._handle_gathered_state() - # deleted — config is optional + # deleted — config is optional; handled separately (lighter path) if self.state == "deleted": proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config else None return self._handle_deleted_state(proposed_config) - # merged — config is required + # merged — config required if self.state == "merged" and not self.config: self.nd.module.fail_json(msg="'config' is required for 'merged' state.") - # overridden with no/empty config — desired state is zero switches, delete all + # overridden with no/empty config — delete everything if self.state == "overridden" and not self.config: self.log.info("Overridden state with no config — deleting all switches from fabric") return self._handle_deleted_state(None) + # --- Validate & classify ------------------------------------------------ proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) - # Partition configs by operation type - poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] + + # Enforce state constraints rma_configs = [c for c in proposed_config if c.operation_type == "rma"] - normal_configs = [c for c in proposed_config if c.operation_type == "normal"] + poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] + if rma_configs and self.state != "merged": + self.nd.module.fail_json(msg="RMA configs are only supported with state=merged") + if poap_configs and self.state not in ("merged", "overridden"): + self.nd.module.fail_json(msg="POAP and pre-provision configs require state=merged or state=overridden") + # Capture all proposed configs for NDOutput output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) for cfg in proposed_config: output_proposed.add(cfg) self.output.assign(proposed=output_proposed) - self.log.info( - "Config partition: %s normal, %s poap, %s rma", - len(normal_configs), - len(poap_configs), - len(rma_configs), - ) - - # POAP and RMA are only valid with state=merged - if (poap_configs or rma_configs) and self.state != "merged": - self.nd.module.fail_json(msg="POAP and RMA configs are only supported with state=merged") + # Classify all configs in one pass — idempotency included + plan = SwitchDiffEngine.compute_changes(proposed_config, list(self.existing), self.log) + + # --- Single combined discovery pass ------------------------------------- + # Discover every switch that is not yet in the fabric: + # • plan.to_add — normal switches not in inventory + # • plan.normal_readd — POAP/preprov mismatches that are reachable + # Switches already in the fabric (to_update, migration_mode) are + # skipped here; overridden will re-discover them after deletion. + configs_to_discover = plan.to_add + plan.normal_readd + if configs_to_discover: + self.log.info( + "Discovering %s switch(es): %s normal-add, %s poap-readd", + len(configs_to_discover), + len(plan.to_add), + len(plan.normal_readd), + ) + discovered_data = self.discovery.discover(configs_to_discover) + else: + self.log.info("No switches need discovery in this run") + discovered_data = {} - # Normal discovery runs first so the fabric inventory is up to date - # before POAP/RMA handlers execute. + # Build proposed SwitchDataModel collection for normal switches only + # (needed for the self.proposed reference used in check-mode reporting) + normal_configs = [c for c in proposed_config if c.operation_type == "normal"] if normal_configs: - existing_ips = {sw.fabric_management_ip for sw in self.existing} - configs_to_discover = [cfg for cfg in normal_configs if cfg.seed_ip not in existing_ips] - if configs_to_discover: - self.log.info( - "Discovery needed for %s/%s switch(es) — %s already in fabric", - len(configs_to_discover), - len(normal_configs), - len(normal_configs) - len(configs_to_discover), - ) - discovered_data = self.discovery.discover(configs_to_discover) - else: - self.log.info("All proposed switches already in fabric — skipping discovery") - discovered_data = {} built = self.discovery.build_proposed(normal_configs, discovered_data, list(self.existing)) self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) - diff = SwitchDiffEngine.compute_changes(list(self.proposed), list(self.existing), self.log) - state_handlers = { - "merged": self._handle_merged_state, - "overridden": self._handle_overridden_state, - } - handler = state_handlers.get(self.state) - if handler is None: - self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") - handler(diff, normal_configs, discovered_data) - - # POAP and RMA run after normal discovery - if poap_configs: - self.poap_handler.handle(poap_configs, list(self.existing)) - if rma_configs: - self.rma_handler.handle(rma_configs, list(self.existing)) + # --- Dispatch ----------------------------------------------------------- + if self.state == "merged": + self._handle_merged_state(plan, discovered_data) + elif self.state == "overridden": + self._handle_overridden_state(plan, discovered_data) + else: + self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") # ===================================================================== # State Handlers (orchestration only — delegate to services) @@ -2448,250 +2623,178 @@ def manage_state(self) -> None: def _handle_merged_state( self, - diff: Dict[str, List[SwitchDataModel]], - proposed_config: List[SwitchConfigModel], - discovered_data: Optional[Dict[str, Dict[str, Any]]] = None, + plan: "SwitchPlan", + discovered_data: Dict[str, Dict[str, Any]], ) -> None: - """Handle merged-state add and migration workflows. + """Handle merged-state workflows for all operation types. + + Processes normal adds, migration-mode switches, POAP bootstrap, + pre-provision, swap, normal re-adds, and RMA in a single pass. + Normal switches that require field-level updates fail fast; use + ``overridden`` state for in-place updates. Args: - diff: Categorized switch diff output. - proposed_config: Validated switch config list. - discovered_data: Optional discovery data by seed IP. + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP for all switches + that required discovery this run. Returns: None. """ self.log.debug("ENTER: _handle_merged_state()") self.log.info("Handling merged state") - self.log.debug("Proposed configs: %s", len(self.proposed)) - self.log.debug("Existing switches: %s", len(self.existing)) - if not self.proposed: - self.log.info("No configurations provided for merged state") - self.log.debug("EXIT: _handle_merged_state() - no configs") - return + # Fail if any normal switches need field-level updates + if plan.to_update: + ips = [cfg.seed_ip for cfg in plan.to_update] + self.nd.module.fail_json( + msg=( + f"Switches require updates not supported in merged state. " + f"Use 'overridden' state for in-place updates. " + f"Affected switches: {ips}" + ) + ) - config_by_ip = {sw.seed_ip: sw for sw in proposed_config} + # Check whether any idempotent switch (normal or POAP) is out of + # config-sync and needs a deploy without a re-add. + # Pre-provisioned switches are placeholder entries that are never + # in-sync by design, so they are excluded from this check. Only relevant when deploy is enabled. existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = False + if self.ctx.deploy_config: + for cfg in plan.idempotent: + if cfg.operation_type == "preprovision": + continue + sw = existing_by_ip.get(cfg.seed_ip) + status = sw.additional_data.config_sync_status if sw and sw.additional_data else None + if status != ConfigSyncStatus.IN_SYNC: + self.log.info( + "Switch %s is idempotent but configSyncStatus='%s' — will finalize", + cfg.seed_ip, + getattr(status, "value", status) if status else "unknown", + ) + idempotent_save_req = True + break - # Phase 1: Handle idempotent switches that may need config sync - idempotent_save_req = self._merged_handle_idempotent(diff, existing_by_ip) - - # Phase 2: Fail on to_update (merged state doesn't support updates) - self._merged_handle_to_update(diff) - - switches_to_add = diff.get("to_add", []) - migration_switches = diff.get("migration_mode", []) - - if not switches_to_add and not migration_switches and not idempotent_save_req: - self.log.info("No switches need adding or migration processing") + has_work = bool( + plan.to_add or plan.migration_mode or plan.to_bootstrap + or plan.normal_readd or plan.to_preprovision or plan.to_swap + or plan.to_rma or idempotent_save_req + ) + if not has_work: + self.log.info("merged: nothing to do — all switches idempotent") return - # Check mode — preview only + # Check mode if self.nd.module.check_mode: self.log.info( - "Check mode: would add %s, process %s migration switch(es), save_deploy_required=%s", - len(switches_to_add), - len(migration_switches), - idempotent_save_req, + "Check mode: add=%s, migrate=%s, bootstrap=%s, " + "readd=%s, preprov=%s, swap=%s, rma=%s, save_deploy=%s", + len(plan.to_add), len(plan.migration_mode), len(plan.to_bootstrap), + len(plan.normal_readd), len(plan.to_preprovision), len(plan.to_swap), + len(plan.to_rma), idempotent_save_req, ) self.results.action = "merge" self.results.state = self.state self.results.operation_type = OperationType.CREATE - self.results.response_current = { - "MESSAGE": "check mode — skipped", - "RETURN_CODE": 200, - } + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { - "to_add": [sw.fabric_management_ip for sw in switches_to_add], - "migration_mode": [sw.fabric_management_ip for sw in migration_switches], + "to_add": [c.seed_ip for c in plan.to_add], + "migration_mode": [c.seed_ip for c in plan.migration_mode], + "bootstrap": [c.seed_ip for c in plan.to_bootstrap], + "normal_readd": [c.seed_ip for c in plan.normal_readd], + "preprovision": [c.seed_ip for c in plan.to_preprovision], + "swap": [c.seed_ip for c in plan.to_swap], + "rma": [c.seed_ip for c in plan.to_rma], "save_deploy_required": idempotent_save_req, } self.results.register_api_call() return - # Collect (serial_number, SwitchConfigModel) pairs for post-processing + # --- Normal + normal_readd bulk_add (one combined pass) ----------------- + add_configs = plan.to_add + plan.normal_readd switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - _bulk_added_ips: set = set() - - # Phase 4: Bulk add new switches to fabric - if switches_to_add and discovered_data: - add_configs = [] - for sw in switches_to_add: - cfg = config_by_ip.get(sw.fabric_management_ip) - if cfg: - add_configs.append(cfg) - else: + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [ + (cfg, discovered_data[cfg.seed_ip]) + for cfg in group_switches + if cfg.seed_ip in discovered_data + ] + if not pairs: self.log.warning( - "No config found for switch %s, skipping add", - sw.fabric_management_ip, + "No discovery data for group %s — skipping bulk_add", + [cfg.seed_ip for cfg in group_switches], ) - - if add_configs: - credential_groups = group_switches_by_credentials(add_configs, self.log) - for group_key, group_switches in credential_groups.items(): - ( - username, - password_hash, - auth_proto, - platform_type, - preserve_config, - ) = group_key - password = group_switches[0].password - - pairs = [] - for cfg in group_switches: - disc = discovered_data.get(cfg.seed_ip) - if disc: - pairs.append((cfg, disc)) - else: - self.log.warning("No discovery data for %s, skipping", cfg.seed_ip) - - if not pairs: - continue - - self.fabric_ops.bulk_add( - switches=pairs, - username=username, - password=password, - auth_proto=auth_proto, - platform_type=platform_type, - preserve_config=preserve_config, - ) - _bulk_added_ips.update(cfg.seed_ip for cfg, _disc in pairs) - - for cfg, disc in pairs: - sn = disc.get("serialNumber") - if sn: - switch_actions.append((sn, cfg)) - self._log_operation("add", cfg.seed_ip) - - # Phase 5: Collect migration switches for post-processing - # Migration mode switches get role updates during post-add processing. - # Track newly added switches in self.sent - if switches_to_add: - _sw_by_ip = {sw.fabric_management_ip: sw for sw in switches_to_add} - for ip in _bulk_added_ips: - sw_data = _sw_by_ip.get(ip) - if sw_data: - self.sent.add(sw_data) - - have_migration_switches = False - if migration_switches: - have_migration_switches = True - - for mig_sw in migration_switches: - cfg = config_by_ip.get(mig_sw.fabric_management_ip) - if cfg and mig_sw.switch_id: - switch_actions.append((mig_sw.switch_id, cfg)) - self._log_operation("migrate", mig_sw.fabric_management_ip) - - if not switch_actions: - self.log.info("No switch actions to process after add/migration collection") - if idempotent_save_req: - self.log.info("Config save and deploy required for out-of-sync idempotent switch(es)") - self.fabric_ops.finalize() - return - - # Common post-processing for all switches (new + migration) - # Brownfield optimisation: if every switch in this batch uses - # preserve_config=True the switches will NOT reload after being - # added to the fabric. Passing this flag lets the wait utility - # skip the unreachable/reload detection phases. - all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) - if all_preserve_config: - self.log.info("All switches in batch are brownfield (preserve_config=True) — reload detection will be skipped") - - self.fabric_ops.post_add_processing( - switch_actions, - wait_utils=self.wait_utils, - context="merged", - all_preserve_config=all_preserve_config, - update_roles=have_migration_switches, - ) - self.log.debug("EXIT: _handle_merged_state() - completed") - - # ----------------------------------------------------------------- - # Merged-state sub-handlers (modular phases) - # ----------------------------------------------------------------- - - def _merged_handle_idempotent( - self, - diff: Dict[str, List[SwitchDataModel]], - existing_by_ip: Dict[str, SwitchDataModel], - ) -> bool: - """Handle idempotent switches that may need config save and deploy. - - If configSyncStatus is anything other than inSync, run config save - and deploy to bring the switch back in sync. - - Args: - diff: Categorized switch diff output. - existing_by_ip: Existing switch lookup by management IP. - - Returns: - bool: True if any idempotent switches require config save and deploy, False otherwise. - """ - idempotent_switches = diff.get("idempotent", []) - if not idempotent_switches: - return False - - for sw in idempotent_switches: - existing_sw = existing_by_ip.get(sw.fabric_management_ip) - status = existing_sw.additional_data.config_sync_status if existing_sw and existing_sw.additional_data else None - if status != ConfigSyncStatus.IN_SYNC: - self.log.info( - "Switch %s (%s) is config-idempotent but configSyncStatus is '%s' — will run config save and deploy", - sw.fabric_management_ip, - sw.switch_id, - getattr(status, "value", status) if status else "unknown", + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, ) - return True - - return False - - def _merged_handle_to_update( - self, - diff: Dict[str, List[SwitchDataModel]], - ) -> None: - """Fail the module if switches require field-level updates. + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + + # Migration-mode switches — no add needed, but role + finalize applies + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + if all_preserve_config: + self.log.info("All switches brownfield (preserve_config=True) — reload detection skipped") + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="merged", + all_preserve_config=all_preserve_config, + update_roles=have_migration, + ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + self.fabric_ops.finalize() - Merged state does not support in-place updates beyond role changes. - Use overridden state which performs delete-and-re-add. + # --- POAP / preprovision / swap / RMA ----------------------------------- + # normal_readd was already processed via bulk_add above. + # Only route the pure POAP-workflow configs to the handler. + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + if plan.to_rma: + self.rma_handler.handle(plan.to_rma, list(self.existing)) - Args: - diff: Categorized switch diff output. - - Returns: - None. - """ - to_update = diff.get("to_update", []) - if not to_update: - return - - ips = [sw.fabric_management_ip for sw in to_update] - self.nd.module.fail_json( - msg=( - f"Switches require updates that are not supported in merged state. " - f"Use 'overridden' state for in-place updates. " - f"Affected switches: {ips}" - ) - ) + self.log.debug("EXIT: _handle_merged_state()") def _handle_overridden_state( self, - diff: Dict[str, List[SwitchDataModel]], - proposed_config: List[SwitchConfigModel], - discovered_data: Optional[Dict[str, Dict[str, Any]]] = None, + plan: "SwitchPlan", + discovered_data: Dict[str, Dict[str, Any]], ) -> None: """Handle overridden-state reconciliation for the fabric. + Reconciles the fabric to match exactly the desired config. Switches + in the fabric that have no config entry are deleted. POAP/preprovision + switches at ``plan.poap_ips`` are excluded from the cleanup sweep. + Normal switches with field differences are deleted and re-added. + Args: - diff: Categorized switch diff output. - proposed_config: Validated switch config list. - discovered_data: Optional discovery data by seed IP. + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP. Returns: None. @@ -2699,68 +2802,65 @@ def _handle_overridden_state( self.log.debug("ENTER: _handle_overridden_state()") self.log.info("Handling overridden state") - if not self.proposed: - self.log.warning("No configurations provided for overridden state") + has_work = bool( + plan.to_add or plan.to_update or plan.to_delete or plan.migration_mode + or plan.to_bootstrap or plan.normal_readd or plan.to_preprovision or plan.to_swap + ) + if not has_work and not self.proposed: + self.log.info("overridden: nothing to do") return - # Check mode — preview only + # Check mode if self.nd.module.check_mode: - n_delete = len(diff.get("to_delete", [])) - n_update = len(diff.get("to_update", [])) - n_add = len(diff.get("to_add", [])) - n_migrate = len(diff.get("migration_mode", [])) self.log.info( - "Check mode: would delete %s, delete-and-re-add %s, add %s, migrate %s", - n_delete, - n_update, - n_add, - n_migrate, + "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, " + "bootstrap=%s, readd=%s, preprov=%s, swap=%s", + len(plan.to_delete), len(plan.to_update), len(plan.to_add), + len(plan.migration_mode), len(plan.to_bootstrap), len(plan.normal_readd), + len(plan.to_preprovision), len(plan.to_swap), ) self.results.action = "override" self.results.state = self.state self.results.operation_type = OperationType.CREATE - self.results.response_current = { - "MESSAGE": "check mode — skipped", - "RETURN_CODE": 200, - } + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { - "to_delete": n_delete, - "to_update": n_update, - "to_add": n_add, - "migration_mode": n_migrate, + "to_delete": len(plan.to_delete) + len(plan.to_delete_existing), + "to_update": len(plan.to_update), + "to_add": len(plan.to_add), + "migration_mode": len(plan.migration_mode), + "bootstrap": len(plan.to_bootstrap), + "normal_readd": len(plan.normal_readd), + "preprovision": len(plan.to_preprovision), + "swap": len(plan.to_swap), } self.results.register_api_call() return - switches_to_delete: List[SwitchDataModel] = [] + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} - # Phase 1: Switches not in proposed config - for sw in diff.get("to_delete", []): - self.log.info( - "Marking for deletion (not in proposed): %s (%s)", - sw.fabric_management_ip, - sw.switch_id, - ) - switches_to_delete.append(sw) + # --- Phase 1: Combined delete ------------------------------------------- + # Merge three sources of deletions into one bulk_delete call: + # a) Orphans (in fabric, not in any config) + # b) POAP/preprovision mismatches (to_delete_existing from compute_changes) + # c) Normal switches that need field updates (to_update) + switches_to_delete: List[SwitchDataModel] = list(plan.to_delete) + for sw in plan.to_delete: self._log_operation("delete", sw.fabric_management_ip) - # Phase 2: Switches that need updating (delete-then-re-add) - for sw in diff.get("to_update", []): - existing_sw = next( - (e for e in self.existing if e.switch_id == sw.switch_id or e.fabric_management_ip == sw.fabric_management_ip), - None, - ) - if existing_sw: - self.log.info( - "Marking for deletion (re-add update): %s (%s)", - existing_sw.fabric_management_ip, - existing_sw.switch_id, - ) - switches_to_delete.append(existing_sw) - self._log_operation("delete_for_update", existing_sw.fabric_management_ip) + for sw in plan.to_delete_existing: + self.log.info("Deleting POAP/preprovision mismatch %s before re-add", sw.fabric_management_ip) + switches_to_delete.append(sw) + self._log_operation("delete", sw.fabric_management_ip) - diff["to_add"].append(sw) + update_ips: set = set() + for cfg in plan.to_update: + sw = existing_by_ip.get(cfg.seed_ip) + if sw: + self.log.info("Deleting normal switch %s for field update re-add", cfg.seed_ip) + switches_to_delete.append(sw) + update_ips.add(cfg.seed_ip) + self._log_operation("delete_for_update", cfg.seed_ip) if switches_to_delete: try: @@ -2772,24 +2872,76 @@ def _handle_overridden_state( for sw in switches_to_delete: self.sent.add(sw) - diff["to_update"] = [] - - # Phase 3: Re-discover switches that were just deleted (they were - # skipped during initial discovery because they were already in the - # fabric). - update_ips = {sw.fabric_management_ip for sw in switches_to_delete} - configs_needing_rediscovery = [cfg for cfg in proposed_config if cfg.seed_ip in update_ips] - if configs_needing_rediscovery: + # --- Phase 2: Re-discover updated normal switches ----------------------- + # to_update configs were already discovered (they were in-fabric) but + # we deleted them; re-discover so bulk_add has current data. + re_discover_configs = [cfg for cfg in plan.to_update if cfg.seed_ip in update_ips] + if re_discover_configs: self.log.info( - "Re-discovering %s switch(es) after deletion for re-add: %s", - len(configs_needing_rediscovery), - [cfg.seed_ip for cfg in configs_needing_rediscovery], + "Re-discovering %s updated switch(es) after deletion", + len(re_discover_configs), + ) + fresh = self.discovery.discover(re_discover_configs) + discovered_data = {**discovered_data, **fresh} + + # --- Phase 3: Combined add (normal to_add + to_update + normal_readd) --- + add_configs = plan.to_add + plan.to_update + plan.normal_readd + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [ + (cfg, discovered_data[cfg.seed_ip]) + for cfg in group_switches + if cfg.seed_ip in discovered_data + ] + if not pairs: + self.log.warning( + "No discovery data for group %s — skipping", + [cfg.seed_ip for cfg in group_switches], + ) + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="overridden", + all_preserve_config=all_preserve_config, + update_roles=have_migration, ) - fresh_discovered = self.discovery.discover(configs_needing_rediscovery) - discovered_data = {**(discovered_data or {}), **fresh_discovered} - # Phase 4: Delegate add + migration to merged state - self._handle_merged_state(diff, proposed_config, discovered_data) + # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- + # plan.to_delete_existing was deleted in Phase 1. + # Route pure POAP-workflow configs to the handler. + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + self.log.debug("EXIT: _handle_overridden_state()") def _handle_gathered_state(self) -> None: @@ -2829,8 +2981,14 @@ def _handle_deleted_state( ) -> None: """Handle deleted-state switch removal. + Matches switches to delete by ``seed_ip`` and optionally ``role``. + POAP/preprovision sub-config blocks (``poap``, ``preprovision``) are + ignored; only ``seed_ip`` and ``role`` matter. When no config is + provided, all switches in the fabric are deleted. + Args: proposed_config: Optional config list that limits deletion scope. + Pass ``None`` to delete all switches. Returns: None. @@ -2847,39 +3005,39 @@ def _handle_deleted_state( for sw in switches_to_delete: self._log_operation("delete", sw.fabric_management_ip) else: + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} switches_to_delete: List[SwitchDataModel] = [] - for switch_config in proposed_config: - identifier = switch_config.seed_ip - self.log.debug("Looking for switch to delete with seed IP: %s", identifier) - existing_switch = next( - (sw for sw in self.existing if sw.fabric_management_ip == identifier), - None, - ) - if existing_switch: + for cfg in proposed_config: + existing_sw = existing_by_ip.get(cfg.seed_ip) + if not existing_sw: + self.log.info("deleted: switch %s not in fabric — skipping", cfg.seed_ip) + continue + # Role filter: if config specifies a role, only delete if it matches + if cfg.role is not None and cfg.role != existing_sw.switch_role: self.log.info( - "Marking for deletion: %s (%s)", - identifier, - existing_switch.switch_id, + "deleted: switch %s role mismatch (config=%s, fabric=%s) — skipping", + cfg.seed_ip, cfg.role, existing_sw.switch_role, ) - switches_to_delete.append(existing_switch) - else: - self.log.info("Switch not found for deletion: %s", identifier) + continue + self.log.info( + "deleted: marking %s (%s) for deletion", + cfg.seed_ip, existing_sw.switch_id, + ) + switches_to_delete.append(existing_sw) + self._log_operation("delete", cfg.seed_ip) self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) if not switches_to_delete: self.log.info("No switches to delete") return - # Check mode — preview only + # Check mode if self.nd.module.check_mode: self.log.info("Check mode: would delete %s switch(es)", len(switches_to_delete)) self.results.action = "delete" self.results.state = self.state self.results.operation_type = OperationType.DELETE - self.results.response_current = { - "MESSAGE": "check mode — skipped", - "RETURN_CODE": 200, - } + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} self.results.result_current = {"success": True, "changed": False} self.results.diff_current = { "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 2663cb51..b11d67f8 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -30,7 +30,7 @@ state: description: - The state of ND and switch(es) after module completion. - - C(merged) is the only state supported for POAP. + - C(merged) and C(overridden) are supported for POAP and pre-provision operations. - C(merged) is the only state supported for RMA. - C(gathered) reads the current fabric inventory and returns it in the C(gathered) key in config format. No changes are made. @@ -363,14 +363,6 @@ rma: - old_serial_number: SAL1234ABCD new_serial_number: SAL9999ZZZZ - model: N9K-C93180YC-EX - version: "10.3(1)" - hostname: leaf-replaced - image_policy: my-image-policy - ip: 192.168.10.50 - gateway_ip: 192.168.10.1/24 - discovery_username: root - discovery_password: "{{ discovery_password }}" state: merged - name: Remove switches from fabric From 7f7ddc24b92d9988e55519623d532e7503a72493 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Wed, 1 Apr 2026 15:31:51 +0530 Subject: [PATCH 087/109] Black Formatting Fix --- plugins/module_utils/utils.py | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 76685d43..355c41a7 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -157,14 +157,10 @@ def save_config( Raises: SwitchOperationError: If all attempts fail. """ - last_error: Exception = SwitchOperationError( - f"Config save produced no attempts for fabric {self.fabric}" - ) + last_error: Exception = SwitchOperationError(f"Config save produced no attempts for fabric {self.fabric}") for attempt in range(1, max_retries + 1): try: - response = self._request_endpoint( - self.ep_config_save, action="Config save" - ) + response = self._request_endpoint(self.ep_config_save, action="Config save") self.log.info( "Config save succeeded on attempt %s/%s for fabric %s", attempt, @@ -189,10 +185,7 @@ def save_config( max_retries, ) time.sleep(retry_delay) - raise SwitchOperationError( - f"Config save failed after {max_retries} attempt(s) " - f"for fabric {self.fabric}: {last_error}" - ) + raise SwitchOperationError(f"Config save failed after {max_retries} attempt(s) " f"for fabric {self.fabric}: {last_error}") def deploy_config(self) -> Dict[str, Any]: """Deploy pending configuration to all switches in the fabric. @@ -206,9 +199,7 @@ def deploy_config(self) -> Dict[str, Any]: Raises: SwitchOperationError: If the deploy request fails. """ - return self._request_endpoint( - self.ep_config_deploy, action="Config deploy" - ) + return self._request_endpoint(self.ep_config_deploy, action="Config deploy") def get_fabric_info(self) -> Dict[str, Any]: """Retrieve fabric information. @@ -219,17 +210,13 @@ def get_fabric_info(self) -> Dict[str, Any]: Raises: SwitchOperationError: If the request fails. """ - return self._request_endpoint( - self.ep_fabric_get, action="Get fabric info" - ) + return self._request_endpoint(self.ep_fabric_get, action="Get fabric info") # ----------------------------------------------------------------- # Internal helpers # ----------------------------------------------------------------- - def _request_endpoint( - self, endpoint, action: str = "Request" - ) -> Dict[str, Any]: + def _request_endpoint(self, endpoint, action: str = "Request") -> Dict[str, Any]: """Execute a request against a pre-configured endpoint. Args: @@ -249,6 +236,4 @@ def _request_endpoint( return response except Exception as e: self.log.error("%s failed for fabric %s: %s", action, self.fabric, e) - raise SwitchOperationError( - f"{action} failed for fabric {self.fabric}: {e}" - ) from e + raise SwitchOperationError(f"{action} failed for fabric {self.fabric}: {e}") from e From 033851705db6bc56f67459b541f5b1c43faaf786 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 2 Apr 2026 00:41:44 +0530 Subject: [PATCH 088/109] Doc Fixes + Config Model Streamlining for POAP/RMA --- .../manage_switches/nd_switch_resources.py | 161 +++++++++--------- .../manage_switches/bootstrap_models.py | 22 +-- .../models/manage_switches/config_models.py | 78 +++------ .../models/manage_switches/rma_models.py | 19 ++- plugins/modules/nd_manage_switches.py | 57 ++----- .../nd_manage_switches/tests/nd/rma.yaml | 12 -- 6 files changed, 136 insertions(+), 213 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index d75bd37f..7512817d 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -1577,7 +1577,7 @@ def _build_bootstrap_import_model( discovery_password = getattr(poap_cfg, "discovery_password", None) # model, version and config_data always come from the bootstrap API for - # bootstrap-only operations. POAP no longer carries these fields. + # bootstrap-only operations. model = bs.get("model", "") version = bs.get("softwareVersion", "") @@ -1626,7 +1626,7 @@ def _build_bootstrap_import_model( data_block["models"] = data_models # Bootstrap API response fields - fingerprint = bs.get("fingerPrint", bs.get("fingerprint", "")) + fingerprint = bs.get("fingerPrint") or bs.get("fingerprint", "") public_key = bs.get("publicKey", "") re_add = bs.get("reAdd", False) in_inventory = bs.get("inInventory", False) @@ -2094,10 +2094,10 @@ def handle( log.info("Found %s RMA entry/entries to process", len(rma_entries)) - # Validate old switches exist and are in correct state + # Validate old switches exist and are in correct state; look up by seed_ip old_switch_info = self._validate_prerequisites(rma_entries, existing) - # Query bootstrap API for publicKey / fingerPrint of new switches + # Query bootstrap API for new switch data bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) bootstrap_idx = build_bootstrap_index(bootstrap_switches) log.debug( @@ -2108,9 +2108,9 @@ def handle( # Build and submit each RMA request switch_actions: List[Tuple[str, SwitchConfigModel]] = [] - rma_diff_data: List[Tuple[str, str, SwitchConfigModel]] = [] # (new_serial, old_serial, switch_cfg) for switch_cfg, rma_cfg in rma_entries: new_serial = rma_cfg.new_serial_number + old_serial = old_switch_info[switch_cfg.seed_ip]["old_serial"] bootstrap_data = bootstrap_idx.get(new_serial) if not bootstrap_data: @@ -2123,32 +2123,20 @@ def handle( log.error(msg) nd.module.fail_json(msg=msg) - SwitchDiffEngine.validate_switch_api_fields( - nd=nd, - serial=rma_cfg.new_serial_number, - model=rma_cfg.model, - version=rma_cfg.version, - config_data=rma_cfg.config_data, - bootstrap_data=bootstrap_data, - log=log, - context="RMA", - ) - rma_model = self._build_rma_model( switch_cfg, rma_cfg, bootstrap_data, - old_switch_info[rma_cfg.old_serial_number], + old_switch_info[switch_cfg.seed_ip], ) log.info( "Built RMA model: replacing %s with %s", - rma_cfg.old_serial_number, + old_serial, rma_model.new_switch_id, ) - self._provision_rma_switch(rma_cfg.old_serial_number, rma_model) + self._provision_rma_switch(rma_model) switch_actions.append((rma_model.new_switch_id, switch_cfg)) - rma_diff_data.append((rma_model.new_switch_id, rma_cfg.old_serial_number, switch_cfg)) # Post-processing: wait for RMA switches to become ready, then # save credentials and finalize. RMA switches come up via POAP @@ -2186,46 +2174,48 @@ def _validate_prerequisites( ) -> Dict[str, Dict[str, Any]]: """Validate RMA prerequisites for each requested replacement. + Looks up the switch to be replaced by ``seed_ip`` (the fabric management + IP). The serial number of the old switch is derived from inventory — + it is not required in the playbook config. + Args: rma_entries: ``(SwitchConfigModel, RMAConfigModel)`` pairs. existing: Current fabric inventory snapshot. Returns: - Dict keyed by old serial with prerequisite metadata. + Dict keyed by ``seed_ip`` with prerequisite metadata including + ``old_serial``, ``hostname``, and ``switch_data``. """ nd = self.ctx.nd log = self.ctx.log log.debug("ENTER: _validate_prerequisites()") - existing_by_serial: Dict[str, SwitchDataModel] = {sw.serial_number: sw for sw in existing if sw.serial_number} + existing_by_ip: Dict[str, SwitchDataModel] = { + sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip + } result: Dict[str, Dict[str, Any]] = {} - for switch_cfg, rma_cfg in rma_entries: - old_serial = rma_cfg.old_serial_number + for switch_cfg, _rma_cfg in rma_entries: + seed_ip = switch_cfg.seed_ip - old_switch = existing_by_serial.get(old_serial) + old_switch = existing_by_ip.get(seed_ip) if old_switch is None: nd.module.fail_json( msg=( - f"RMA: old_serial '{old_serial}' not found in " - f"fabric '{self.ctx.fabric}'. The switch being " - f"replaced must exist in the inventory." + f"RMA: seed_ip '{seed_ip}' not found in " + f"fabric '{self.ctx.fabric}' inventory. The switch " + f"being replaced must exist in the fabric." ) ) - # Verify the seed_ip in config matches the IP of the switch - # identified by old_serial in the fabric inventory. - seed_ip = switch_cfg.seed_ip - inventory_ip = old_switch.fabric_management_ip - if seed_ip != inventory_ip: + old_serial = old_switch.serial_number or old_switch.switch_id + if not old_serial: nd.module.fail_json( msg=( - f"RMA: seed_ip '{seed_ip}' does not match the " - f"fabric management IP '{inventory_ip}' of switch " - f"with serial '{old_serial}'. Verify that seed_ip " - f"and old_serial refer to the same switch." + f"RMA: Switch at '{seed_ip}' has no serial number in " + f"the inventory response." ) ) @@ -2233,14 +2223,16 @@ def _validate_prerequisites( if ad is None: nd.module.fail_json( msg=( - f"RMA: Switch '{old_serial}' has no additional data " f"in the inventory response. Cannot verify discovery " f"status and system mode." + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') has no " + f"additional data in the inventory response. Cannot verify " + f"discovery status and system mode." ) ) if ad.discovery_status != DiscoveryStatus.UNREACHABLE: nd.module.fail_json( msg=( - f"RMA: Switch '{old_serial}' has discovery status " + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') has discovery status " f"'{getattr(ad.discovery_status, 'value', ad.discovery_status) if ad.discovery_status else 'unknown'}', " f"expected 'unreachable'. The old switch must be " f"unreachable before RMA can proceed." @@ -2250,21 +2242,22 @@ def _validate_prerequisites( if ad.system_mode != SystemMode.MAINTENANCE: nd.module.fail_json( msg=( - f"RMA: Switch '{old_serial}' is in " + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') is in " f"'{getattr(ad.system_mode, 'value', ad.system_mode) if ad.system_mode else 'unknown'}' " f"mode, expected 'maintenance'. Put the switch in " f"maintenance mode before initiating RMA." ) ) - result[old_serial] = { + result[seed_ip] = { + "old_serial": old_serial, "hostname": old_switch.hostname or "", "switch_data": old_switch, } log.info( - "RMA prerequisite check passed for old_serial '%s' (hostname=%s, discovery=%s, mode=%s)", + "RMA prerequisite check passed for '%s' (serial=%s, discovery=%s, mode=%s)", + seed_ip, old_serial, - old_switch.hostname, ad.discovery_status, ad.system_mode, ) @@ -2281,75 +2274,73 @@ def _build_rma_model( ) -> RMASwitchModel: """Build an RMA model from config and bootstrap data. + All switch properties (model, version, gateway, modules) are sourced + exclusively from the bootstrap API response. Only the new serial number, + optional image policy, and optional discovery credentials come from the + playbook config. + Args: switch_cfg: Parent switch config. rma_cfg: RMA config entry. bootstrap_data: Bootstrap response entry for the replacement switch. - old_switch_info: Prerequisite metadata for the switch being replaced. + old_switch_info: Prerequisite metadata keyed from _validate_prerequisites. Returns: Completed ``RMASwitchModel`` for API submission. """ log = self.ctx.log + old_serial = old_switch_info["old_serial"] log.debug( "ENTER: _build_rma_model(new=%s, old=%s)", rma_cfg.new_serial_number, - rma_cfg.old_serial_number, + old_serial, ) - # User config fields - new_switch_id = rma_cfg.new_serial_number - hostname = old_switch_info.get("hostname", "") - ip = switch_cfg.seed_ip - image_policy = rma_cfg.image_policy - switch_role = switch_cfg.role - password = switch_cfg.password - auth_proto = SnmpV3AuthProtocol.MD5 # RMA always uses MD5 - - discovery_username = rma_cfg.discovery_username - discovery_password = rma_cfg.discovery_password - - # Bootstrap API response fields - public_key = bootstrap_data.get("publicKey", "") - finger_print = bootstrap_data.get("fingerPrint", bootstrap_data.get("fingerprint", "")) bs_data = bootstrap_data.get("data") or {} - # Use user-provided values when available; fall back to bootstrap API data. - model_name = rma_cfg.model or bootstrap_data.get("model", "") - version = rma_cfg.version or bootstrap_data.get("softwareVersion", "") - gateway_ip_mask = (rma_cfg.config_data.gateway if rma_cfg.config_data else None) or bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask") - data_models = (rma_cfg.config_data.models if rma_cfg.config_data else None) or bs_data.get("models", []) + gateway_ip_mask = bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask", "") + data_models = bs_data.get("models", []) + model = bootstrap_data.get("model", "") + software_version = bootstrap_data.get("softwareVersion", "") + public_key = bootstrap_data.get("publicKey", "") + finger_print = bootstrap_data.get("fingerPrint") or bootstrap_data.get("fingerprint", "") rma_model = RMASwitchModel( gatewayIpMask=gateway_ip_mask, - model=model_name, - softwareVersion=version, - imagePolicy=image_policy, - switchRole=switch_role, - password=password, - discoveryAuthProtocol=auth_proto, - discoveryUsername=discovery_username, - discoveryPassword=discovery_password, - hostname=hostname, - ip=ip, - newSwitchId=new_switch_id, + model=model, + softwareVersion=software_version, + imagePolicy=rma_cfg.image_policy, + switchRole=switch_cfg.role, + password=switch_cfg.password, + discoveryAuthProtocol=SnmpV3AuthProtocol.MD5, + discoveryUsername=rma_cfg.discovery_username, + discoveryPassword=rma_cfg.discovery_password, + hostname=old_switch_info.get("hostname", ""), + ip=switch_cfg.seed_ip, + newSwitchId=rma_cfg.new_serial_number, + oldSwitchId=old_serial, publicKey=public_key, fingerPrint=finger_print, - data=({"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None), + data=( + {"gatewayIpMask": gateway_ip_mask, "models": data_models} + if (gateway_ip_mask or data_models) + else None + ), ) - log.debug("EXIT: _build_rma_model() -> newSwitchId=%s", rma_model.new_switch_id) + log.debug("EXIT: _build_rma_model() -> newSwitchId=%s, oldSwitchId=%s", rma_model.new_switch_id, old_serial) return rma_model def _provision_rma_switch( self, - old_switch_id: str, rma_model: RMASwitchModel, ) -> None: """Submit an RMA provisioning request for one switch. + The old and new switch IDs are embedded in the payload via + ``oldSwitchId`` and ``newSwitchId`` fields on the model. + Args: - old_switch_id: Identifier of the switch being replaced. rma_model: RMA model for the replacement switch. Returns: @@ -2363,18 +2354,18 @@ def _provision_rma_switch( endpoint = EpManageFabricsSwitchProvisionRMAPost() endpoint.fabric_name = self.ctx.fabric - endpoint.switch_sn = old_switch_id + endpoint.switch_sn = rma_model.old_switch_id payload = rma_model.to_payload() - log.info("RMA: Replacing %s with %s", old_switch_id, rma_model.new_switch_id) + log.info("RMA: Replacing %s with %s", rma_model.old_switch_id, rma_model.new_switch_id) log.debug("RMA endpoint: %s", endpoint.path) log.debug("RMA payload (masked): %s", mask_password(payload)) try: nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) except Exception as e: - msg = f"RMA provision API call failed for " f"{old_switch_id} → {rma_model.new_switch_id}: {e}" + msg = f"RMA provision API call failed for {rma_model.old_switch_id} → {rma_model.new_switch_id}: {e}" log.error(msg) nd.module.fail_json(msg=msg) @@ -2386,13 +2377,13 @@ def _provision_rma_switch( results.response_current = response results.result_current = result results.diff_current = { - "old_switch_id": old_switch_id, + "old_switch_id": rma_model.old_switch_id, "new_switch_id": rma_model.new_switch_id, } results.register_api_call() if not result.get("success"): - msg = f"RMA provision failed for {old_switch_id} → " f"{rma_model.new_switch_id}: {response}" + msg = f"RMA provision failed for {rma_model.old_switch_id} → {rma_model.new_switch_id}: {response}" log.error(msg) nd.module.fail_json(msg=msg) diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index f413a9f6..21ebd6f2 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -58,10 +58,10 @@ class BootstrapBaseModel(NDBaseModel): identifiers: ClassVar[List[str]] = [] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") - model: str = Field(..., description="Model of the bootstrap switch") - software_version: str = Field( - ..., + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, alias="softwareVersion", description="Software version of the bootstrap switch", ) @@ -75,10 +75,12 @@ class BootstrapBaseModel(NDBaseModel): @field_validator("gateway_ip_mask", mode="before") @classmethod - def validate_gateway(cls, v: str) -> str: + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None result = SwitchValidators.validate_cidr(v) if result is None: - raise ValueError("gateway_ip_mask cannot be empty") + raise ValueError("gateway_ip_mask is not a valid CIDR") return result @@ -196,9 +198,9 @@ class BootstrapImportSwitchModel(NDBaseModel): exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") - model: str = Field(..., description="Model of the bootstrap switch") - software_version: str = Field( - ..., + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, alias="softwareVersion", description="Software version of the bootstrap switch", ) @@ -244,7 +246,7 @@ class BootstrapImportSwitchModel(NDBaseModel): description="Image policy associated with the switch during bootstrap", ) switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") - gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") @field_validator("ip", mode="before") @classmethod diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 2e8daf61..b953cf6f 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -207,70 +207,45 @@ class RMAConfigModel(NDNestedModel): """ RMA configuration entry for replacing a single switch via bootstrap. + The old switch is identified from the fabric inventory using ``seed_ip``. + All switch properties (model, version, gateway, modules) are sourced from + the bootstrap API at runtime — only the new serial number is required. + The switch being replaced must be in maintenance mode and either shut down or disconnected from the network before initiating the RMA operation. """ identifiers: ClassVar[List[str]] = [] - # Discovery credentials - discovery_username: Optional[str] = Field( - default=None, - alias="discoveryUsername", - description="Username for device discovery during POAP and RMA discovery", - ) - discovery_password: Optional[str] = Field( - default=None, - alias="discoveryPassword", - description="Password for device discovery during POAP and RMA discovery", - ) - - # Required fields for RMA + # Required new_serial_number: str = Field( ..., alias="newSerialNumber", min_length=1, - description="Serial number of the new/replacement switch to Bootstrap for RMA", - ) - old_serial_number: str = Field( - ..., - alias="oldSerialNumber", - min_length=1, - description="Serial number of the existing switch to be replaced by RMA", - ) - model: Optional[str] = Field( - default=None, - min_length=1, - description="Model of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API.", - ) - version: Optional[str] = Field( - default=None, - min_length=1, - description="Software version of switch to Bootstrap for RMA. If omitted, sourced from bootstrap API.", + description="Serial number of the replacement switch to bootstrap for RMA", ) - # Optional fields + # Optional image_policy: Optional[str] = Field( default=None, alias="imagePolicy", - description="Name of the image policy to be applied on switch during Bootstrap for RMA", + description="Name of the image policy to be applied on the replacement switch", ) - - # Optional config data for RMA (models list + gateway); sourced from bootstrap API if omitted - config_data: Optional[ConfigDataModel] = Field( + discovery_username: Optional[str] = Field( default=None, - alias="configData", - description=( - "Basic config data of switch to Bootstrap for RMA. " - "'models' (list of module models) and 'gateway' (IP with mask) are mandatory " - "when provided. If omitted, sourced from bootstrap API." - ), + alias="discoveryUsername", + description="Username for device discovery during RMA bootstrap", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during RMA bootstrap", ) - @field_validator("new_serial_number", "old_serial_number", mode="before") + @field_validator("new_serial_number", mode="before") @classmethod def validate_serial_numbers(cls, v: str) -> str: - """Validate serial numbers are not empty.""" + """Validate new_serial_number is not empty.""" result = SwitchValidators.validate_serial_number(v) if result is None: raise ValueError("Serial number cannot be empty") @@ -278,12 +253,7 @@ def validate_serial_numbers(cls, v: str) -> str: @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> "RMAConfigModel": - """Validate that discovery_username and discovery_password are both set or both absent. - - Mirrors the dcnm_inventory.py bidirectional check: - - discovery_username set → discovery_password required - - discovery_password set → discovery_username required - """ + """Validate that discovery_username and discovery_password are both set or both absent.""" has_user = bool(self.discovery_username) has_pass = bool(self.discovery_password) if has_user and not has_pass: @@ -707,19 +677,9 @@ def get_argument_spec(cls) -> Dict[str, Any]: elements="dict", options=dict( new_serial_number=dict(type="str", required=True), - old_serial_number=dict(type="str", required=True), discovery_username=dict(type="str"), discovery_password=dict(type="str", no_log=True), - model=dict(type="str"), - version=dict(type="str"), image_policy=dict(type="str"), - config_data=dict( - type="dict", - options=dict( - models=dict(type="list", elements="str"), - gateway=dict(type="str"), - ), - ), ), ), ), diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index 88c00571..08ab8e06 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -43,11 +43,11 @@ class RMASwitchModel(NDBaseModel): identifiers: ClassVar[List[str]] = ["new_switch_id"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] - # From bootstrapBase - gateway_ip_mask: str = Field(..., alias="gatewayIpMask", description="Gateway IP address with mask") - model: str = Field(..., description="Model of the bootstrap switch") - software_version: str = Field( - ..., + # From bootstrapBase (all sourced from bootstrap API, not user config) + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, alias="softwareVersion", description="Software version of the bootstrap switch", ) @@ -69,7 +69,8 @@ class RMASwitchModel(NDBaseModel): # From RMASpecific hostname: str = Field(..., description="Hostname of the switch") ip: str = Field(..., description="IP address of the switch") - new_switch_id: str = Field(..., alias="newSwitchId", description="SwitchId (serial number) of the switch") + new_switch_id: str = Field(..., alias="newSwitchId", description="SwitchId (serial number) of the replacement switch") + old_switch_id: str = Field(..., alias="oldSwitchId", description="SwitchId (serial number) of the switch being replaced") public_key: str = Field(..., alias="publicKey", description="Public Key") finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") dhcp_bootstrap_ip: Optional[str] = Field(default=None, alias="dhcpBootstrapIp") @@ -81,10 +82,12 @@ class RMASwitchModel(NDBaseModel): @field_validator("gateway_ip_mask", mode="before") @classmethod - def validate_gateway(cls, v: str) -> str: + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return None result = SwitchValidators.validate_cidr(v) if result is None: - raise ValueError("gateway_ip_mask cannot be empty") + raise ValueError("gateway_ip_mask is not a valid CIDR") return result @field_validator("hostname", mode="before") diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index b11d67f8..07638ebc 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -205,58 +205,29 @@ rma: description: - RMA an existing switch with a new one. - - Please note that the existing switch being replaced should be configured, deployed in maintenance mode - and then shutdown (unreachable state). + - The switch being replaced is identified by C(seed_ip). + - The existing switch must be configured, deployed in maintenance mode, + and then shutdown (unreachable state) before initiating RMA. type: list elements: dict suboptions: new_serial_number: description: - - Serial number of switch to Bootstrap for RMA. + - Serial number of the replacement switch in the POAP/bootstrap loop. type: str required: true - old_serial_number: + image_policy: description: - - Serial number of switch to be replaced by RMA. + - Name of the image policy to be applied on the replacement switch. type: str - required: true discovery_username: description: - - Username for device discovery during POAP and RMA discovery. + - Username for device discovery during RMA bootstrap. type: str discovery_password: description: - - Password for device discovery during POAP and RMA discovery. - type: str - model: - description: - - Model of switch to Bootstrap for RMA. - type: str - version: - description: - - Software version of switch to Bootstrap for RMA. - type: str - image_policy: - description: - - Name of the image policy to be applied on switch during Bootstrap for RMA. + - Password for device discovery during RMA bootstrap. type: str - config_data: - description: - - Basic config data of switch to Bootstrap for RMA. - - C(models) and C(gateway) are optional. - - C(models) is list of model of modules in switch to Bootstrap for RMA. - - C(gateway) is the gateway IP with mask for the switch to Bootstrap for RMA. - type: dict - suboptions: - models: - description: - - List of module models in the switch. - type: list - elements: str - gateway: - description: - - Gateway IP with subnet mask (e.g., 192.168.0.1/24). - type: str extends_documentation_fragment: - cisco.nd.modules @@ -321,6 +292,9 @@ model: N9K-C93180YC-EX version: "10.3(1)" hostname: leaf-preprov + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" config_data: models: - N9K-C93180YC-EX @@ -338,6 +312,9 @@ poap: serial_number: SAL5678EFGH hostname: leaf-bootstrap + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" state: merged - name: Swap serial number on a pre-provisioned switch (POAP swap) @@ -361,8 +338,10 @@ username: admin password: "{{ switch_password }}" rma: - - old_serial_number: SAL1234ABCD - new_serial_number: SAL9999ZZZZ + - new_serial_number: SAL9999ZZZZ + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" state: merged - name: Remove switches from fabric diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml index 8113ef04..a4daff9a 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml @@ -14,11 +14,6 @@ sw1: "{{ ansible_switch1 }}" sw1_serial: "1ABC23DEFGH" sw1_rma_serial: "1ABC23DERMA" - rma_model: "SW1-K1234v" - rma_version: "12.3(4)" - rma_hostname: "RMA-SW" - rma_configmodel: "['SW1-K1234v']" - rma_gateway: "192.168.2.1/24" deploy: "{{ deploy }}" rma_enabled: false delegate_to: localhost @@ -148,13 +143,6 @@ password: '{{ switch_password }}' rma: - new_serial_number: '{{ test_data.sw1_rma_serial }}' - old_serial_number: '{{ test_data.sw1_serial }}' - model: '{{ test_data.rma_model }}' - version: '{{ test_data.rma_version }}' - hostname: '{{ test_data.rma_hostname }}' - config_data: - models: '{{ test_data.rma_configmodel }}' - gateway: '{{ test_data.rma_gateway }}' register: result - name: ASSERT - Check condition From 2ab029046b4420c48b48623375534379a6b3ed87 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 2 Apr 2026 00:57:27 +0530 Subject: [PATCH 089/109] Validators Additions + Fix --- .../manage_switches/bootstrap_models.py | 36 +----- .../models/manage_switches/config_models.py | 48 +++---- .../manage_switches/discovery_models.py | 15 +-- .../manage_switches/preprovision_models.py | 27 +--- .../models/manage_switches/rma_models.py | 29 ++--- .../manage_switches/switch_data_models.py | 15 +-- .../models/manage_switches/validators.py | 120 ++++++++++++++++++ 7 files changed, 166 insertions(+), 124 deletions(-) diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index 21ebd6f2..22b3ba9e 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -48,7 +48,7 @@ class BootstrapBaseData(NDNestedModel): @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: Optional[str]) -> Optional[str]: - return SwitchValidators.validate_cidr(v) + return SwitchValidators.validate_cidr_optional(v) class BootstrapBaseModel(NDBaseModel): @@ -76,12 +76,7 @@ class BootstrapBaseModel(NDBaseModel): @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None - result = SwitchValidators.validate_cidr(v) - if result is None: - raise ValueError("gateway_ip_mask is not a valid CIDR") - return result + return SwitchValidators.validate_cidr_optional(v) class BootstrapCredentialModel(NDBaseModel): @@ -165,25 +160,17 @@ class BootstrapImportSpecificModel(NDBaseModel): @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result + return SwitchValidators.require_hostname(v) @field_validator("ip", "dhcp_bootstrap_ip", mode="before") @classmethod def validate_ip(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None return SwitchValidators.validate_ip_address(v) @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) class BootstrapImportSwitchModel(NDBaseModel): @@ -251,26 +238,17 @@ class BootstrapImportSwitchModel(NDBaseModel): @field_validator("ip", mode="before") @classmethod def validate_ip_field(cls, v: str) -> str: - result = SwitchValidators.validate_ip_address(v) - if result is None: - raise ValueError(f"Invalid IP address: {v}") - return result + return SwitchValidators.require_ip_address(v) @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result + return SwitchValidators.require_hostname(v) @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) @computed_field(alias="useNewCredentials") @property diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index b953cf6f..0b0edf22 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -117,25 +117,23 @@ class POAPConfigModel(NDNestedModel): description="Name of the image policy to be applied on switch", ) + @field_validator("hostname", mode="before") + @classmethod + def validate_hostname_field(cls, v: str) -> str: + """Validate hostname is not empty and well-formed.""" + return SwitchValidators.require_hostname(v) + @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> "POAPConfigModel": """Validate that discovery_username and discovery_password are both set or both absent.""" - has_user = bool(self.discovery_username) - has_pass = bool(self.discovery_password) - if has_user and not has_pass: - raise ValueError("discovery_password must be set when discovery_username is specified") - if has_pass and not has_user: - raise ValueError("discovery_username must be set when discovery_password is specified") + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) return self @field_validator("serial_number", mode="before") @classmethod def validate_serial_number_field(cls, v: str) -> str: """Validate serial_number is not empty.""" - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) class PreprovisionConfigModel(NDNestedModel): @@ -182,25 +180,23 @@ class PreprovisionConfigModel(NDNestedModel): description="Image policy to apply during pre-provision", ) + @field_validator("hostname", mode="before") + @classmethod + def validate_hostname_field(cls, v: str) -> str: + """Validate hostname is not empty and well-formed.""" + return SwitchValidators.require_hostname(v) + @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> "PreprovisionConfigModel": """Validate that discovery_username and discovery_password are both set or both absent.""" - has_user = bool(self.discovery_username) - has_pass = bool(self.discovery_password) - if has_user and not has_pass: - raise ValueError("discovery_password must be set when discovery_username is specified") - if has_pass and not has_user: - raise ValueError("discovery_username must be set when discovery_password is specified") + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) return self @field_validator("serial_number", mode="before") @classmethod def validate_serial_number_field(cls, v: str) -> str: """Validate serial_number is not empty.""" - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) class RMAConfigModel(NDNestedModel): @@ -246,20 +242,12 @@ class RMAConfigModel(NDNestedModel): @classmethod def validate_serial_numbers(cls, v: str) -> str: """Validate new_serial_number is not empty.""" - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("Serial number cannot be empty") - return result + return SwitchValidators.require_serial_number(v, "new_serial_number") @model_validator(mode="after") def validate_discovery_credentials_pair(self) -> "RMAConfigModel": """Validate that discovery_username and discovery_password are both set or both absent.""" - has_user = bool(self.discovery_username) - has_pass = bool(self.discovery_password) - if has_user and not has_pass: - raise ValueError("discovery_password must be set when discovery_username is specified") - if has_pass and not has_user: - raise ValueError("discovery_username must be set when discovery_password is specified") + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) return self diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index 6cf95066..7091fa21 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -131,26 +131,17 @@ class SwitchDiscoveryModel(NDBaseModel): @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result + return SwitchValidators.require_hostname(v) @field_validator("ip", mode="before") @classmethod def validate_ip(cls, v: str) -> str: - result = SwitchValidators.validate_ip_address(v) - if result is None: - raise ValueError("ip cannot be empty") - return result + return SwitchValidators.require_ip_address(v) @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) @field_validator("vdc_mac", mode="before") @classmethod diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index ce5f4aae..435a1611 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -13,7 +13,6 @@ __metaclass__ = type -from ipaddress import ip_network from typing import Any, Dict, List, Optional, ClassVar, Literal from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( @@ -141,39 +140,25 @@ class PreProvisionSwitchModel(NDBaseModel): @field_validator("ip", "dhcp_bootstrap_ip", mode="before") @classmethod def validate_ip(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None - result = SwitchValidators.validate_ip_address(v) - if result is None: - raise ValueError(f"Invalid IP address: {v}") - return result + return SwitchValidators.validate_ip_address(v) @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result + return SwitchValidators.require_hostname(v) @field_validator("serial_number", mode="before") @classmethod def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("serial_number cannot be empty") - return result + return SwitchValidators.require_serial_number(v) @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: str) -> str: - if not v or "/" not in v: + result = SwitchValidators.validate_cidr(v) + if result is None: raise ValueError("gatewayIpMask must include subnet mask (e.g., 10.23.244.1/24)") - try: - ip_network(v, strict=False) - except Exception as exc: - raise ValueError(f"Invalid gatewayIpMask: {v}") from exc - return v + return result @computed_field(alias="useNewCredentials") @property diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index 08ab8e06..40b9f843 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -83,38 +83,27 @@ class RMASwitchModel(NDBaseModel): @field_validator("gateway_ip_mask", mode="before") @classmethod def validate_gateway(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None - result = SwitchValidators.validate_cidr(v) - if result is None: - raise ValueError("gateway_ip_mask is not a valid CIDR") - return result + return SwitchValidators.validate_cidr_optional(v) @field_validator("hostname", mode="before") @classmethod def validate_host(cls, v: str) -> str: - result = SwitchValidators.validate_hostname(v) - if result is None: - raise ValueError("hostname cannot be empty") - return result + return SwitchValidators.require_hostname(v) @field_validator("ip", "dhcp_bootstrap_ip", mode="before") @classmethod def validate_ip(cls, v: Optional[str]) -> Optional[str]: - if v is None: - return None - result = SwitchValidators.validate_ip_address(v) - if v is not None and result is None: - raise ValueError(f"Invalid IP address: {v}") - return result + return SwitchValidators.validate_ip_address(v) @field_validator("new_switch_id", mode="before") @classmethod def validate_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("new_switch_id cannot be empty") - return result + return SwitchValidators.require_serial_number(v, "new_switch_id") + + @field_validator("old_switch_id", mode="before") + @classmethod + def validate_old_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v, "old_switch_id") @computed_field(alias="useNewCredentials") @property diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 80dbe70a..689f04aa 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -57,10 +57,7 @@ class TelemetryIpCollection(NDNestedModel): description="Out of band IPv6 address", ) - @field_validator("inband_ipv4_address", "out_of_band_ipv4_address", mode="before") - @classmethod - def validate_ipv4(cls, v: Optional[str]) -> Optional[str]: - return SwitchValidators.validate_ip_address(v) + class VpcData(NDNestedModel): @@ -89,10 +86,7 @@ class VpcData(NDNestedModel): @field_validator("peer_switch_id", mode="before") @classmethod def validate_peer_serial(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("peer_switch_id cannot be empty") - return result + return SwitchValidators.require_serial_number(v, "peer_switch_id") class SwitchMetadata(NDNestedModel): @@ -267,10 +261,7 @@ def parse_additional_data(cls, v: Any) -> Any: @field_validator("switch_id", mode="before") @classmethod def validate_switch_id(cls, v: str) -> str: - result = SwitchValidators.validate_serial_number(v) - if result is None: - raise ValueError("switch_id cannot be empty") - return result + return SwitchValidators.require_serial_number(v, "switch_id") @field_validator("fabric_management_ip", mode="before") @classmethod diff --git a/plugins/module_utils/models/manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py index 626b4865..5c316042 100644 --- a/plugins/module_utils/models/manage_switches/validators.py +++ b/plugins/module_utils/models/manage_switches/validators.py @@ -18,8 +18,24 @@ class SwitchValidators: """ Common validators for switch-related fields. + + The ``validate_*`` static methods are safe to call from Pydantic + ``@field_validator`` bodies. They return ``None`` when the value is + absent and raise ``ValueError`` on bad input. + + The ``require_*`` helpers are convenience wrappers that additionally + raise ``ValueError`` when the result is ``None`` (i.e. the field was + empty after stripping). Use them in place of the repetitive + ``result = …; if result is None: raise …`` pattern. + + ``check_discovery_credentials_pair`` is a shared ``@model_validator`` + helper that enforces the mutual-presence rule for discovery credentials. """ + # ------------------------------------------------------------------ + # Low-level nullable validators (return None when absent) + # ------------------------------------------------------------------ + @staticmethod def validate_ip_address(v: Optional[str]) -> Optional[str]: """Validate IPv4 or IPv6 address.""" @@ -104,6 +120,110 @@ def validate_vpc_domain(v: Optional[int]) -> Optional[int]: raise ValueError(f"VPC domain must be between 1 and 1000: {v}") return v + # ------------------------------------------------------------------ + # Required-field helpers (raise ValueError when value is absent) + # ------------------------------------------------------------------ + + @staticmethod + def require_serial_number(v: str, field_name: str = "serial_number") -> str: + """Validate and require a non-empty serial number. + + Delegates to ``validate_serial_number`` and raises ``ValueError`` + when the result is ``None`` (empty after stripping). + + Args: + v: Raw serial number value from Pydantic. + field_name: Field name used in the error message. + + Returns: + Validated serial number string. + + Raises: + ValueError: When the value is empty or contains invalid characters. + """ + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError(f"{field_name} cannot be empty") + return result + + @staticmethod + def require_hostname(v: str) -> str: + """Validate and require a non-empty hostname. + + Args: + v: Raw hostname value from Pydantic. + + Returns: + Validated hostname string. + + Raises: + ValueError: When the value is empty or fails RFC 1123 checks. + """ + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @staticmethod + def require_ip_address(v: str) -> str: + """Validate and require a non-empty IP address. + + Args: + v: Raw IP address value from Pydantic. + + Returns: + Validated IP address string. + + Raises: + ValueError: When the value is empty or not a valid IPv4/v6 address. + """ + result = SwitchValidators.validate_ip_address(v) + if result is None: + raise ValueError(f"Invalid IP address: {v}") + return result + + @staticmethod + def validate_cidr_optional(v: Optional[str]) -> Optional[str]: + """Validate an optional CIDR string; pass through ``None`` unchanged. + + Args: + v: Raw CIDR value or ``None``. + + Returns: + Validated CIDR string, or ``None``. + + Raises: + ValueError: When the value is present but not valid CIDR notation. + """ + if v is None: + return None + result = SwitchValidators.validate_cidr(v) + if result is None: + raise ValueError(f"Invalid CIDR notation: {v}") + return result + + @staticmethod + def check_discovery_credentials_pair(username: Optional[str], password: Optional[str]) -> None: + """Enforce mutual-presence of discovery credentials. + + Both ``discovery_username`` and ``discovery_password`` must either be + absent together or present together. Call from a ``@model_validator`` + body to avoid duplicating the same four-line check across every model. + + Args: + username: discovery_username value (may be ``None``). + password: discovery_password value (may be ``None``). + + Raises: + ValueError: When exactly one of the two is provided. + """ + has_user = bool(username) + has_pass = bool(password) + if has_user and not has_pass: + raise ValueError("discovery_password must be set when discovery_username is specified") + if has_pass and not has_user: + raise ValueError("discovery_username must be set when discovery_password is specified") + __all__ = [ "SwitchValidators", From a36bc5161ed8c43dd4f1a1f625ef97e5014b9331 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 2 Apr 2026 01:03:57 +0530 Subject: [PATCH 090/109] Black + Sanity Fixes --- .../manage_switches/nd_switch_resources.py | 135 ++++++++++-------- .../manage_switches/switch_data_models.py | 2 - 2 files changed, 74 insertions(+), 63 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 7512817d..f8d348ce 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -314,12 +314,8 @@ def compute_changes( len(existing), ) - existing_by_ip: Dict[str, SwitchDataModel] = { - sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip - } - existing_by_id: Dict[str, SwitchDataModel] = { - sw.switch_id: sw for sw in existing if sw.switch_id - } + existing_by_ip: Dict[str, SwitchDataModel] = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} + existing_by_id: Dict[str, SwitchDataModel] = {sw.switch_id: sw for sw in existing if sw.switch_id} # Fields compared for normal switches compare_fields = { @@ -387,7 +383,9 @@ def compute_changes( if serial_match and role_match: log.info( "Bootstrap %s serial=%s role=%s — idempotent, skipping", - cfg.seed_ip, serial, cfg.role, + cfg.seed_ip, + serial, + cfg.role, ) idempotent.append(cfg) continue @@ -395,7 +393,9 @@ def compute_changes( status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None log.info( "Bootstrap %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", - cfg.seed_ip, serial_match, role_match, + cfg.seed_ip, + serial_match, + role_match, getattr(status, "value", status) if status else "unknown", ) to_delete_existing.append(existing_sw) @@ -424,7 +424,9 @@ def compute_changes( if serial_match and role_match: log.info( "Preprovision %s serial=%s role=%s — idempotent, skipping", - cfg.seed_ip, serial, cfg.role, + cfg.seed_ip, + serial, + cfg.role, ) idempotent.append(cfg) continue @@ -432,7 +434,9 @@ def compute_changes( status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None log.info( "Preprovision %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", - cfg.seed_ip, serial_match, role_match, + cfg.seed_ip, + serial_match, + role_match, getattr(status, "value", status) if status else "unknown", ) to_delete_existing.append(existing_sw) @@ -483,7 +487,9 @@ def compute_changes( else: log.info( "Normal %s: role mismatch (config=%s, existing=%s) — marking to_update", - cfg.seed_ip, cfg.role, existing_sw.switch_role, + cfg.seed_ip, + cfg.role, + existing_sw.switch_role, ) to_update.append(cfg) continue @@ -495,7 +501,8 @@ def compute_changes( if sw.switch_id and sw.switch_id not in accounted_ids and sw.fabric_management_ip not in poap_ips: log.info( "Existing %s (%s) has no config entry — marking to_delete", - sw.fabric_management_ip, sw.switch_id, + sw.fabric_management_ip, + sw.switch_id, ) to_delete.append(sw) @@ -516,9 +523,16 @@ def compute_changes( log.info( "compute_changes: to_add=%s, to_update=%s, to_delete=%s, migration=%s, " "idempotent=%s, bootstrap=%s, normal_readd=%s, preprov=%s, swap=%s, rma=%s", - len(plan.to_add), len(plan.to_update), len(plan.to_delete), len(plan.migration_mode), - len(plan.idempotent), len(plan.to_bootstrap), len(plan.normal_readd), - len(plan.to_preprovision), len(plan.to_swap), len(plan.to_rma), + len(plan.to_add), + len(plan.to_update), + len(plan.to_delete), + len(plan.migration_mode), + len(plan.idempotent), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + len(plan.to_rma), ) log.debug("EXIT: compute_changes()") return plan @@ -2191,9 +2205,7 @@ def _validate_prerequisites( log.debug("ENTER: _validate_prerequisites()") - existing_by_ip: Dict[str, SwitchDataModel] = { - sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip - } + existing_by_ip: Dict[str, SwitchDataModel] = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} result: Dict[str, Dict[str, Any]] = {} @@ -2212,12 +2224,7 @@ def _validate_prerequisites( old_serial = old_switch.serial_number or old_switch.switch_id if not old_serial: - nd.module.fail_json( - msg=( - f"RMA: Switch at '{seed_ip}' has no serial number in " - f"the inventory response." - ) - ) + nd.module.fail_json(msg=(f"RMA: Switch at '{seed_ip}' has no serial number in " f"the inventory response.")) ad = old_switch.additional_data if ad is None: @@ -2321,11 +2328,7 @@ def _build_rma_model( oldSwitchId=old_serial, publicKey=public_key, fingerPrint=finger_print, - data=( - {"gatewayIpMask": gateway_ip_mask, "models": data_models} - if (gateway_ip_mask or data_models) - else None - ), + data=({"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None), ) log.debug("EXIT: _build_rma_model() -> newSwitchId=%s, oldSwitchId=%s", rma_model.new_switch_id, old_serial) @@ -2639,11 +2642,7 @@ def _handle_merged_state( if plan.to_update: ips = [cfg.seed_ip for cfg in plan.to_update] self.nd.module.fail_json( - msg=( - f"Switches require updates not supported in merged state. " - f"Use 'overridden' state for in-place updates. " - f"Affected switches: {ips}" - ) + msg=(f"Switches require updates not supported in merged state. " f"Use 'overridden' state for in-place updates. " f"Affected switches: {ips}") ) # Check whether any idempotent switch (normal or POAP) is out of @@ -2668,9 +2667,14 @@ def _handle_merged_state( break has_work = bool( - plan.to_add or plan.migration_mode or plan.to_bootstrap - or plan.normal_readd or plan.to_preprovision or plan.to_swap - or plan.to_rma or idempotent_save_req + plan.to_add + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap + or plan.to_rma + or idempotent_save_req ) if not has_work: self.log.info("merged: nothing to do — all switches idempotent") @@ -2679,11 +2683,15 @@ def _handle_merged_state( # Check mode if self.nd.module.check_mode: self.log.info( - "Check mode: add=%s, migrate=%s, bootstrap=%s, " - "readd=%s, preprov=%s, swap=%s, rma=%s, save_deploy=%s", - len(plan.to_add), len(plan.migration_mode), len(plan.to_bootstrap), - len(plan.normal_readd), len(plan.to_preprovision), len(plan.to_swap), - len(plan.to_rma), idempotent_save_req, + "Check mode: add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, rma=%s, save_deploy=%s", + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + len(plan.to_rma), + idempotent_save_req, ) self.results.action = "merge" self.results.state = self.state @@ -2713,11 +2721,7 @@ def _handle_merged_state( for group_key, group_switches in credential_groups.items(): username, _pw_hash, auth_proto, platform_type, preserve_config = group_key password = group_switches[0].password - pairs = [ - (cfg, discovered_data[cfg.seed_ip]) - for cfg in group_switches - if cfg.seed_ip in discovered_data - ] + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] if not pairs: self.log.warning( "No discovery data for group %s — skipping bulk_add", @@ -2794,8 +2798,14 @@ def _handle_overridden_state( self.log.info("Handling overridden state") has_work = bool( - plan.to_add or plan.to_update or plan.to_delete or plan.migration_mode - or plan.to_bootstrap or plan.normal_readd or plan.to_preprovision or plan.to_swap + plan.to_add + or plan.to_update + or plan.to_delete + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap ) if not has_work and not self.proposed: self.log.info("overridden: nothing to do") @@ -2804,11 +2814,15 @@ def _handle_overridden_state( # Check mode if self.nd.module.check_mode: self.log.info( - "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, " - "bootstrap=%s, readd=%s, preprov=%s, swap=%s", - len(plan.to_delete), len(plan.to_update), len(plan.to_add), - len(plan.migration_mode), len(plan.to_bootstrap), len(plan.normal_readd), - len(plan.to_preprovision), len(plan.to_swap), + "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s", + len(plan.to_delete), + len(plan.to_update), + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), ) self.results.action = "override" self.results.state = self.state @@ -2885,11 +2899,7 @@ def _handle_overridden_state( for group_key, group_switches in credential_groups.items(): username, _pw_hash, auth_proto, platform_type, preserve_config = group_key password = group_switches[0].password - pairs = [ - (cfg, discovered_data[cfg.seed_ip]) - for cfg in group_switches - if cfg.seed_ip in discovered_data - ] + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] if not pairs: self.log.warning( "No discovery data for group %s — skipping", @@ -3007,12 +3017,15 @@ def _handle_deleted_state( if cfg.role is not None and cfg.role != existing_sw.switch_role: self.log.info( "deleted: switch %s role mismatch (config=%s, fabric=%s) — skipping", - cfg.seed_ip, cfg.role, existing_sw.switch_role, + cfg.seed_ip, + cfg.role, + existing_sw.switch_role, ) continue self.log.info( "deleted: marking %s (%s) for deletion", - cfg.seed_ip, existing_sw.switch_id, + cfg.seed_ip, + existing_sw.switch_id, ) switches_to_delete.append(existing_sw) self._log_operation("delete", cfg.seed_ip) diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index 689f04aa..cd8dd030 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -58,8 +58,6 @@ class TelemetryIpCollection(NDNestedModel): ) - - class VpcData(NDNestedModel): """ vPC pair configuration and operational status for a switch. From 78604ab63dbf991d0f0f0c31ee5a60b566b42195 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 2 Apr 2026 14:49:13 +0530 Subject: [PATCH 091/109] Overridden fixes for Normal Switches, POAP/Preprovision --- .../manage_switches/nd_switch_resources.py | 192 +++++++++--------- .../models/manage_switches/config_models.py | 6 +- 2 files changed, 103 insertions(+), 95 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index f8d348ce..30171e22 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -281,9 +281,11 @@ def compute_changes( Idempotency rules by operation type: - * **normal** — compare ``seed_ip``, ``serial_number`` (via discovery), - ``hostname``, ``model``, ``software_version``, and ``role`` against - the existing inventory. + * **normal** — compare ``role`` against the existing inventory entry + found by ``seed_ip``. Role is the only user-specifiable field for + normal switches; hostname, model, and software version are not + user-supplied and are not compared. No discovery is performed for + switches already in the fabric. * **poap / preprovision** — compare ``seed_ip``, ``serial_number`` (from ``poap.serial_number`` / ``preprovision.serial_number``), and ``role`` against the existing inventory. If all three match the @@ -317,17 +319,6 @@ def compute_changes( existing_by_ip: Dict[str, SwitchDataModel] = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} existing_by_id: Dict[str, SwitchDataModel] = {sw.switch_id: sw for sw in existing if sw.switch_id} - # Fields compared for normal switches - compare_fields = { - "switch_id", - "serial_number", - "fabric_management_ip", - "hostname", - "model", - "software_version", - "switch_role", - } - # Output buckets to_add: List[SwitchConfigModel] = [] to_update: List[SwitchConfigModel] = [] @@ -412,31 +403,50 @@ def compute_changes( # ------------------------------------------------------------------ if op == "preprovision": poap_ips.add(cfg.seed_ip) - serial = cfg.preprovision.serial_number if cfg.preprovision else None + pp = cfg.preprovision + serial = pp.serial_number if pp else None if not existing_sw: log.info("Preprovision %s: not in fabric — queue for preprovision", cfg.seed_ip) to_preprovision.append(cfg) continue - serial_match = serial and serial in (existing_sw.serial_number, existing_sw.switch_id) + serial_match = bool(serial and serial in (existing_sw.serial_number, existing_sw.switch_id)) role_match = cfg.role is None or cfg.role == existing_sw.switch_role - if serial_match and role_match: + model_match = pp is None or pp.model is None or pp.model == existing_sw.model + version_match = pp is None or pp.version is None or pp.version == existing_sw.software_version + hostname_match = pp is None or pp.hostname is None or pp.hostname == existing_sw.hostname + + if serial_match and role_match and model_match and version_match and hostname_match: log.info( - "Preprovision %s serial=%s role=%s — idempotent, skipping", + "Preprovision %s serial=%s role=%s model=%s version=%s hostname=%s — idempotent, skipping", cfg.seed_ip, serial, cfg.role, + pp.model if pp else None, + pp.version if pp else None, + pp.hostname if pp else None, ) idempotent.append(cfg) continue + diffs = [] + if not serial_match: + diffs.append(f"serial(config={serial}, fabric={existing_sw.serial_number})") + if not role_match: + diffs.append(f"role(config={cfg.role}, fabric={existing_sw.switch_role})") + if not model_match: + diffs.append(f"model(config={pp.model if pp else None}, fabric={existing_sw.model})") + if not version_match: + diffs.append(f"version(config={pp.version if pp else None}, fabric={existing_sw.software_version})") + if not hostname_match: + diffs.append(f"hostname(config={pp.hostname if pp else None}, fabric={existing_sw.hostname})") + status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None log.info( - "Preprovision %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", + "Preprovision %s differs [%s] (status=%s) — deleting existing", cfg.seed_ip, - serial_match, - role_match, + ", ".join(diffs), getattr(status, "value", status) if status else "unknown", ) to_delete_existing.append(existing_sw) @@ -451,15 +461,6 @@ def compute_changes( # ------------------------------------------------------------------ # Normal switch # ------------------------------------------------------------------ - # Note: serial/id comparison happens after discovery via build_proposed; - # here we rely on the SwitchDataModel that build_proposed will produce - # being present in existing. Since this function receives SwitchConfigModel - # objects (not yet resolved to SwitchDataModel), normal-switch idempotency - # is done after discover() + build_proposed() by comparing the resulting - # SwitchDataModel against existing using compare_fields. - # - # The code below handles the case where the switch is *already* in the - # fabric (no discovery needed) and can be evaluated immediately. if op == "normal": if not existing_sw: log.info("Normal %s: not in fabric — queue for discovery + add", cfg.seed_ip) @@ -471,19 +472,13 @@ def compute_changes( migration_mode.append(cfg) continue - # Build a lightweight comparison dict from config vs existing - # for fields we can evaluate without discovery data. + # Role is the only user-specifiable field for a normal switch. + # hostname, model, and software_version are device-reported and + # not part of desired config — no discovery needed. role_match = cfg.role is None or cfg.role == existing_sw.switch_role - # IP always matches (looked up by IP), so only role matters - # for an already-in-fabric switch; other fields (model, version, - # hostname) are only verifiable after discovery. if role_match: - log.info("Normal %s: in fabric, role matches — checking field diff after build_proposed", cfg.seed_ip) - # Defer final diff to after build_proposed; treat as to_add - # so the caller runs discovery and build_proposed, then sees - # the switch in to_update/idempotent from a second pass. - # For now simply indicate "needs evaluation" by placing in to_add. - to_add.append(cfg) + log.info("Normal %s: in fabric, role matches — idempotent", cfg.seed_ip) + idempotent.append(cfg) else: log.info( "Normal %s: role mismatch (config=%s, existing=%s) — marking to_update", @@ -1417,38 +1412,9 @@ def handle( results.register_api_call() return - # Idempotency: skip entries whose target serial is already in the fabric. - # Build lookup structures for idempotency checks. - # Bootstrap: idempotent when both IP address AND serial number match. - # PreProvision: idempotent when IP address alone matches. - existing_by_ip = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} - - active_bootstrap = [] - for switch_cfg, poap_cfg in bootstrap_entries: - existing_sw = existing_by_ip.get(switch_cfg.seed_ip) - if existing_sw and poap_cfg.serial_number in ( - existing_sw.serial_number, - existing_sw.switch_id, - ): - log.info( - "Bootstrap: IP '%s' with serial '%s' already in fabric — idempotent, skipping", - switch_cfg.seed_ip, - poap_cfg.serial_number, - ) - else: - active_bootstrap.append((switch_cfg, poap_cfg)) - bootstrap_entries = active_bootstrap - - active_preprov = [] - for switch_cfg, preprov_cfg in preprov_entries: - if switch_cfg.seed_ip in existing_by_ip: - log.info( - "PreProvision: IP '%s' already in fabric — idempotent, skipping", - switch_cfg.seed_ip, - ) - else: - active_preprov.append((switch_cfg, preprov_cfg)) - preprov_entries = active_preprov + # Idempotency is handled entirely by compute_changes before entries + # reach this handler. Everything in bootstrap_entries / preprov_entries + # has already been classified as needing action — no re-checking here. # Handle swap entries (change serial number on pre-provisioned switches) if swap_entries: @@ -2615,6 +2581,40 @@ def manage_state(self) -> None: # State Handlers (orchestration only — delegate to services) # ===================================================================== + def _check_idempotent_sync( + self, + plan: "SwitchPlan", + existing_by_ip: Dict[str, "SwitchDataModel"], + ) -> bool: + """Return True if any non-preprovision idempotent switch is out of config-sync. + + Pre-provisioned switches are placeholder entries that are never + in-sync by design and are excluded from this check. Only relevant + when deploy is enabled; returns False immediately otherwise. + + Args: + plan: Action plan from :meth:`SwitchDiffEngine.compute_changes`. + existing_by_ip: Existing switches keyed by fabric management IP. + + Returns: + True if finalize should run for idempotent switches, False otherwise. + """ + if not self.ctx.deploy_config: + return False + for cfg in plan.idempotent: + if cfg.operation_type == "preprovision": + continue + sw = existing_by_ip.get(cfg.seed_ip) + status = sw.additional_data.config_sync_status if sw and sw.additional_data else None + if status != ConfigSyncStatus.IN_SYNC: + self.log.info( + "Switch %s is idempotent but configSyncStatus='%s' — will finalize", + cfg.seed_ip, + getattr(status, "value", status) if status else "unknown", + ) + return True + return False + def _handle_merged_state( self, plan: "SwitchPlan", @@ -2642,7 +2642,22 @@ def _handle_merged_state( if plan.to_update: ips = [cfg.seed_ip for cfg in plan.to_update] self.nd.module.fail_json( - msg=(f"Switches require updates not supported in merged state. " f"Use 'overridden' state for in-place updates. " f"Affected switches: {ips}") + msg=(f"Switches require role updates not supported in merged state. " + f"Use 'overridden' state for in-place updates. " + f"Affected switches: {ips}") + ) + + # Fail if any POAP/preprovision switches already in fabric differ on + # one or more of: serial, role, model, version, hostname — + # delete+re-provision is destructive and only permitted in overridden state. + if plan.to_delete_existing: + ips = [sw.fabric_management_ip for sw in plan.to_delete_existing] + self.nd.module.fail_json( + msg=(f"POAP/preprovision switches already in fabric have a " + f"field mismatch (serial, role, model, version, or hostname) " + f"and require delete + re-provision. " + f"Use 'overridden' state to apply this change. " + f"Affected switches: {ips}") ) # Check whether any idempotent switch (normal or POAP) is out of @@ -2650,21 +2665,7 @@ def _handle_merged_state( # Pre-provisioned switches are placeholder entries that are never # in-sync by design, so they are excluded from this check. Only relevant when deploy is enabled. existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} - idempotent_save_req = False - if self.ctx.deploy_config: - for cfg in plan.idempotent: - if cfg.operation_type == "preprovision": - continue - sw = existing_by_ip.get(cfg.seed_ip) - status = sw.additional_data.config_sync_status if sw and sw.additional_data else None - if status != ConfigSyncStatus.IN_SYNC: - self.log.info( - "Switch %s is idempotent but configSyncStatus='%s' — will finalize", - cfg.seed_ip, - getattr(status, "value", status) if status else "unknown", - ) - idempotent_save_req = True - break + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) has_work = bool( plan.to_add @@ -2797,6 +2798,9 @@ def _handle_overridden_state( self.log.debug("ENTER: _handle_overridden_state()") self.log.info("Handling overridden state") + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) + has_work = bool( plan.to_add or plan.to_update @@ -2806,6 +2810,7 @@ def _handle_overridden_state( or plan.normal_readd or plan.to_preprovision or plan.to_swap + or idempotent_save_req ) if not has_work and not self.proposed: self.log.info("overridden: nothing to do") @@ -2814,7 +2819,7 @@ def _handle_overridden_state( # Check mode if self.nd.module.check_mode: self.log.info( - "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s", + "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, save_deploy=%s", len(plan.to_delete), len(plan.to_update), len(plan.to_add), @@ -2823,6 +2828,7 @@ def _handle_overridden_state( len(plan.normal_readd), len(plan.to_preprovision), len(plan.to_swap), + idempotent_save_req, ) self.results.action = "override" self.results.state = self.state @@ -2838,12 +2844,11 @@ def _handle_overridden_state( "normal_readd": len(plan.normal_readd), "preprovision": len(plan.to_preprovision), "swap": len(plan.to_swap), + "save_deploy_required": idempotent_save_req, } self.results.register_api_call() return - existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} - # --- Phase 1: Combined delete ------------------------------------------- # Merge three sources of deletions into one bulk_delete call: # a) Orphans (in fabric, not in any config) @@ -2935,6 +2940,9 @@ def _handle_overridden_state( all_preserve_config=all_preserve_config, update_roles=have_migration, ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + self.fabric_ops.finalize() # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- # plan.to_delete_existing was deleted in Phase 1. diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 0b0edf22..f00a4370 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -434,9 +434,9 @@ def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": """ state = (info.context or {}).get("state") if info else None - # POAP/Pre-provision/Swap only allowed with merged - if (self.poap or self.preprovision) and state not in (None, "merged"): - raise ValueError(f"POAP/Pre-provision operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})") + # POAP/Pre-provision/Swap allowed with merged or overridden + if (self.poap or self.preprovision) and state not in (None, "merged", "overridden"): + raise ValueError(f"POAP/Pre-provision operations require 'merged' or 'overridden' state, " f"got '{state}' (switch: {self.seed_ip})") # RMA only allowed with merged if self.rma and state not in (None, "merged"): From 9adee563c1f26f7c2a7e357b952ef553a5039463 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 2 Apr 2026 14:57:12 +0530 Subject: [PATCH 092/109] Black Fix + Sanity --- .../manage_switches/nd_switch_resources.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 30171e22..5e05c684 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2642,9 +2642,11 @@ def _handle_merged_state( if plan.to_update: ips = [cfg.seed_ip for cfg in plan.to_update] self.nd.module.fail_json( - msg=(f"Switches require role updates not supported in merged state. " - f"Use 'overridden' state for in-place updates. " - f"Affected switches: {ips}") + msg=( + f"Switches require role updates not supported in merged state. " + f"Use 'overridden' state for in-place updates. " + f"Affected switches: {ips}" + ) ) # Fail if any POAP/preprovision switches already in fabric differ on @@ -2653,11 +2655,13 @@ def _handle_merged_state( if plan.to_delete_existing: ips = [sw.fabric_management_ip for sw in plan.to_delete_existing] self.nd.module.fail_json( - msg=(f"POAP/preprovision switches already in fabric have a " - f"field mismatch (serial, role, model, version, or hostname) " - f"and require delete + re-provision. " - f"Use 'overridden' state to apply this change. " - f"Affected switches: {ips}") + msg=( + f"POAP/preprovision switches already in fabric have a " + f"field mismatch (serial, role, model, version, or hostname) " + f"and require delete + re-provision. " + f"Use 'overridden' state to apply this change. " + f"Affected switches: {ips}" + ) ) # Check whether any idempotent switch (normal or POAP) is out of From 932a18682c766b60a50d830f055be388247a33b1 Mon Sep 17 00:00:00 2001 From: Matt Tarkington Date: Thu, 2 Apr 2026 14:25:20 -0400 Subject: [PATCH 093/109] temp update to pr branch list for ci --- .github/workflows/ansible-test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index 9b1d87e0..e98fd030 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -9,6 +9,7 @@ on: branches: - develop - main + - nd42_integration # schedule: # # * is a special character in YAML so you have to quote this string # - cron: '0 6 * * *' From ddb3bb8bf8413c7c18832c41130e7fd00833ffc8 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Sun, 5 Apr 2026 20:38:42 +0530 Subject: [PATCH 094/109] State Fix + Config Output Changes --- .../manage_switches/nd_switch_resources.py | 80 ++++++++++++++++++- .../models/manage_switches/config_models.py | 5 +- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 5e05c684..d581f10b 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2418,6 +2418,8 @@ def __init__( ) self.before: NDConfigCollection = self.existing.copy() self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.sent_adds: List[SwitchConfigModel] = [] + self.proposed_cfgs: List[SwitchConfigModel] = [] except Exception as e: msg = f"Failed to query fabric '{self.fabric}' inventory " f"during initialization: {e}" log.error(msg) @@ -2440,6 +2442,40 @@ def __init__( log.info("Initialized NDSwitchResourceModule for fabric: %s", self.fabric) + def _inventory_to_config_list(self, collection: "NDConfigCollection") -> List[Dict[str, Any]]: + """Convert an inventory collection (SwitchDataModel) to gathered-format config dicts. + + Produces the same shape as gathered state output: seed_ip, role, auth_proto, + preserve_config, username/password placeholders. Built directly from + SwitchDataModel fields to avoid re-running Pydantic validators. + """ + result = [] + for sw in collection: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + result.append({ + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": False, + "username": "", + "password": "", + }) + return result + + def _proposed_to_config_list(self, configs: List["SwitchConfigModel"]) -> List[Dict[str, Any]]: + """Serialize proposed configs for output, stripping internal fields and masking passwords.""" + result = [] + for cfg in configs: + try: + entry = cfg.to_config(exclude={"platform_type": True, "operation_type": True}) + entry["password"] = "" + result.append(entry) + except Exception as exc: + self.log.warning("Could not convert config %s for output: %s", cfg.seed_ip, exc) + return result + def exit_json(self) -> None: """Finalize collected results and exit the Ansible module. @@ -2473,8 +2509,40 @@ def exit_json(self) -> None: response_data=self._query_all_switches(), model_class=SwitchDataModel, ) - self.output.assign(after=self.existing, diff=self.sent) - final.update(self.output.format()) + # Build diff: deletes (from self.sent) + adds (from self.sent_adds) + diff_list: List[Dict[str, Any]] = [] + for sw in self.sent: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + entry = { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": False, + "username": "", + "password": "", + "_action": "deleted", + } + diff_list.append(entry) + for cfg in self.sent_adds: + try: + entry = cfg.to_config(exclude={"platform_type": True, "operation_type": True}) + entry["password"] = "" + entry["_action"] = "added" + diff_list.append(entry) + except Exception as exc: + self.log.warning("Could not convert added config for diff: %s", exc) + output_level = self.module.params.get("output_level", "normal") + fmt_kwargs: Dict[str, Any] = { + "before": self._inventory_to_config_list(self.before), + "after": self._inventory_to_config_list(self.existing), + "diff": diff_list, + } + if output_level in ("info", "debug"): + fmt_kwargs["proposed"] = self._proposed_to_config_list(self.proposed_cfgs) + self.output.assign(before=self.before, after=self.existing) + final.update(self.output.format(**fmt_kwargs)) if True in self.results.failed: self.nd.module.fail_json(**final) @@ -2539,6 +2607,7 @@ def manage_state(self) -> None: for cfg in proposed_config: output_proposed.add(cfg) self.output.assign(proposed=output_proposed) + self.proposed_cfgs = list(proposed_config) # Classify all configs in one pass — idempotency included plan = SwitchDiffEngine.compute_changes(proposed_config, list(self.existing), self.log) @@ -2746,6 +2815,7 @@ def _handle_merged_state( if sn: switch_actions.append((sn, cfg)) self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) # Migration-mode switches — no add needed, but role + finalize applies for cfg in plan.migration_mode: @@ -2753,6 +2823,7 @@ def _handle_merged_state( if sw and sw.switch_id: switch_actions.append((sw.switch_id, cfg)) self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) if switch_actions: all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) @@ -2774,8 +2845,10 @@ def _handle_merged_state( # Only route the pure POAP-workflow configs to the handler. poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) self.poap_handler.handle(poap_workflow_configs, list(self.existing)) if plan.to_rma: + self.sent_adds.extend(plan.to_rma) self.rma_handler.handle(plan.to_rma, list(self.existing)) self.log.debug("EXIT: _handle_merged_state()") @@ -2928,12 +3001,14 @@ def _handle_overridden_state( if sn: switch_actions.append((sn, cfg)) self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) for cfg in plan.migration_mode: sw = existing_by_ip.get(cfg.seed_ip) if sw and sw.switch_id: switch_actions.append((sw.switch_id, cfg)) self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) if switch_actions: all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) @@ -2953,6 +3028,7 @@ def _handle_overridden_state( # Route pure POAP-workflow configs to the handler. poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) self.poap_handler.handle(poap_workflow_configs, list(self.existing)) self.log.debug("EXIT: _handle_overridden_state()") diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index f00a4370..905c52b8 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -434,8 +434,9 @@ def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": """ state = (info.context or {}).get("state") if info else None - # POAP/Pre-provision/Swap allowed with merged or overridden - if (self.poap or self.preprovision) and state not in (None, "merged", "overridden"): + # POAP/Pre-provision/Swap allowed with merged, overridden, or deleted + # (deleted ignores the sub-config and only uses seed_ip + role) + if (self.poap or self.preprovision) and state not in (None, "merged", "overridden", "deleted"): raise ValueError(f"POAP/Pre-provision operations require 'merged' or 'overridden' state, " f"got '{state}' (switch: {self.seed_ip})") # RMA only allowed with merged From 8119ba577ceb54eef7613a4eed9e9f402fb3977e Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 6 Apr 2026 01:09:38 +0530 Subject: [PATCH 095/109] Add Replaced State + Tests Restructuring + Deploy Config Interdependence --- .../manage_switches/nd_switch_resources.py | 199 +++++++++++++++++- .../models/manage_switches/config_models.py | 21 +- plugins/modules/nd_manage_switches.py | 11 +- .../nd_manage_switches/files/.gitignore | 0 .../nd_manage_switches/tasks/main.yaml | 2 +- .../tests/{nd => }/deleted.yaml | 3 +- .../tests/{nd => }/gathered.yaml | 0 .../tests/{nd => }/merged.yaml | 6 +- .../tests/{nd => }/overridden.yaml | 4 +- .../tests/{nd => }/poap.yaml | 0 .../nd_manage_switches/tests/replaced.yaml | 136 ++++++++++++ .../tests/{nd => }/rma.yaml | 0 .../tests/{nd => }/sanity.yaml | 100 +++++++-- 13 files changed, 432 insertions(+), 50 deletions(-) create mode 100644 tests/integration/targets/nd_manage_switches/files/.gitignore rename tests/integration/targets/nd_manage_switches/tests/{nd => }/deleted.yaml (95%) rename tests/integration/targets/nd_manage_switches/tests/{nd => }/gathered.yaml (100%) rename tests/integration/targets/nd_manage_switches/tests/{nd => }/merged.yaml (96%) rename tests/integration/targets/nd_manage_switches/tests/{nd => }/overridden.yaml (95%) rename tests/integration/targets/nd_manage_switches/tests/{nd => }/poap.yaml (100%) create mode 100644 tests/integration/targets/nd_manage_switches/tests/replaced.yaml rename tests/integration/targets/nd_manage_switches/tests/{nd => }/rma.yaml (100%) rename tests/integration/targets/nd_manage_switches/tests/{nd => }/sanity.yaml (64%) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index d581f10b..ecaf64aa 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2427,6 +2427,7 @@ def __init__( # Operation tracking self.nd_logs: List[Dict[str, Any]] = [] + self.msg: str = "" self.output: NDOutput = NDOutput(output_level=self.module.params.get("output_level", "normal")) self.output.assign(before=self.before, after=self.existing) @@ -2469,7 +2470,9 @@ def _proposed_to_config_list(self, configs: List["SwitchConfigModel"]) -> List[D result = [] for cfg in configs: try: - entry = cfg.to_config(exclude={"platform_type": True, "operation_type": True}) + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) entry["password"] = "" result.append(entry) except Exception as exc: @@ -2527,7 +2530,9 @@ def exit_json(self) -> None: diff_list.append(entry) for cfg in self.sent_adds: try: - entry = cfg.to_config(exclude={"platform_type": True, "operation_type": True}) + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) entry["password"] = "" entry["_action"] = "added" diff_list.append(entry) @@ -2544,6 +2549,8 @@ def exit_json(self) -> None: self.output.assign(before=self.before, after=self.existing) final.update(self.output.format(**fmt_kwargs)) + if self.msg: + final["msg"] = self.msg if True in self.results.failed: self.nd.module.fail_json(**final) self.nd.module.exit_json(**final) @@ -2582,9 +2589,9 @@ def manage_state(self) -> None: proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config else None return self._handle_deleted_state(proposed_config) - # merged — config required - if self.state == "merged" and not self.config: - self.nd.module.fail_json(msg="'config' is required for 'merged' state.") + # merged / replaced — config required + if self.state in ("merged", "replaced") and not self.config: + self.nd.module.fail_json(msg=f"'config' is required for '{self.state}' state.") # overridden with no/empty config — delete everything if self.state == "overridden" and not self.config: @@ -2599,8 +2606,6 @@ def manage_state(self) -> None: poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] if rma_configs and self.state != "merged": self.nd.module.fail_json(msg="RMA configs are only supported with state=merged") - if poap_configs and self.state not in ("merged", "overridden"): - self.nd.module.fail_json(msg="POAP and pre-provision configs require state=merged or state=overridden") # Capture all proposed configs for NDOutput output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) @@ -2641,6 +2646,8 @@ def manage_state(self) -> None: # --- Dispatch ----------------------------------------------------------- if self.state == "merged": self._handle_merged_state(plan, discovered_data) + elif self.state == "replaced": + self._handle_replaced_state(plan, discovered_data) elif self.state == "overridden": self._handle_overridden_state(plan, discovered_data) else: @@ -2752,6 +2759,7 @@ def _handle_merged_state( ) if not has_work: self.log.info("merged: nothing to do — all switches idempotent") + self.msg = "No switches to merge — fabric already matches desired config" return # Check mode @@ -2889,8 +2897,9 @@ def _handle_overridden_state( or plan.to_swap or idempotent_save_req ) - if not has_work and not self.proposed: + if not has_work: self.log.info("overridden: nothing to do") + self.msg = "No switches to override — fabric already matches desired config" return # Check mode @@ -3033,6 +3042,179 @@ def _handle_overridden_state( self.log.debug("EXIT: _handle_overridden_state()") + def _handle_replaced_state( + self, + plan: "SwitchPlan", + discovered_data: Dict[str, Any], + ) -> None: + """Handle replaced-state reconciliation for the fabric. + + Reconciles only the switches listed in the desired config. Field + differences trigger delete and re-add, and POAP/preprovision mismatches + are also re-provisioned. + + Args: + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_replaced_state()") + self.log.info("Handling replaced state") + + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) + + has_work = bool( + plan.to_add + or plan.to_update + or plan.to_delete_existing + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap + or idempotent_save_req + ) + if not has_work: + self.log.info("replaced: nothing to do") + self.msg = "No switches to replace — fabric already matches desired config" + return + + # Check mode + if self.nd.module.check_mode: + self.log.info( + "Check mode: poap_mismatch_delete=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, save_deploy=%s", + len(plan.to_delete_existing), + len(plan.to_update), + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + idempotent_save_req, + ) + self.results.action = "replace" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = { + "to_delete": len(plan.to_delete_existing), + "to_update": len(plan.to_update), + "to_add": len(plan.to_add), + "migration_mode": len(plan.migration_mode), + "bootstrap": len(plan.to_bootstrap), + "normal_readd": len(plan.normal_readd), + "preprovision": len(plan.to_preprovision), + "swap": len(plan.to_swap), + "save_deploy_required": idempotent_save_req, + } + self.results.register_api_call() + return + + # --- Phase 1: Combined delete ------------------------------------------- + # Two sources of deletions (orphans intentionally excluded): + # a) POAP/preprovision mismatches (to_delete_existing from compute_changes) + # b) Normal switches that need field updates (to_update) + switches_to_delete: List[SwitchDataModel] = [] + + for sw in plan.to_delete_existing: + self.log.info("Deleting POAP/preprovision mismatch %s before re-add", sw.fabric_management_ip) + switches_to_delete.append(sw) + self._log_operation("delete", sw.fabric_management_ip) + + update_ips: set = set() + for cfg in plan.to_update: + sw = existing_by_ip.get(cfg.seed_ip) + if sw: + self.log.info("Deleting normal switch %s for field update re-add", cfg.seed_ip) + switches_to_delete.append(sw) + update_ips.add(cfg.seed_ip) + self._log_operation("delete_for_update", cfg.seed_ip) + + if switches_to_delete: + try: + self.fabric_ops.bulk_delete(switches_to_delete) + except SwitchOperationError as e: + msg = f"Failed to delete switches during replaced state: {e}" + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + for sw in switches_to_delete: + self.sent.add(sw) + + # --- Phase 2: Re-discover updated normal switches ----------------------- + re_discover_configs = [cfg for cfg in plan.to_update if cfg.seed_ip in update_ips] + if re_discover_configs: + self.log.info( + "Re-discovering %s updated switch(es) after deletion", + len(re_discover_configs), + ) + fresh = self.discovery.discover(re_discover_configs) + discovered_data = {**discovered_data, **fresh} + + # --- Phase 3: Combined add (normal to_add + to_update + normal_readd) --- + add_configs = plan.to_add + plan.to_update + plan.normal_readd + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] + if not pairs: + self.log.warning( + "No discovery data for group %s — skipping", + [cfg.seed_ip for cfg in group_switches], + ) + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) + + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="replaced", + all_preserve_config=all_preserve_config, + update_roles=have_migration, + ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + self.fabric_ops.finalize() + + # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + + self.log.debug("EXIT: _handle_replaced_state()") + def _handle_gathered_state(self) -> None: """Handle gathered-state read of the fabric inventory. @@ -3121,6 +3303,7 @@ def _handle_deleted_state( self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) if not switches_to_delete: self.log.info("No switches to delete") + self.msg = "No switches to delete - fabric already matches desired config" return # Check mode diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index 905c52b8..ed0a77e9 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -434,16 +434,11 @@ def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": """ state = (info.context or {}).get("state") if info else None - # POAP/Pre-provision/Swap allowed with merged, overridden, or deleted - # (deleted ignores the sub-config and only uses seed_ip + role) - if (self.poap or self.preprovision) and state not in (None, "merged", "overridden", "deleted"): - raise ValueError(f"POAP/Pre-provision operations require 'merged' or 'overridden' state, " f"got '{state}' (switch: {self.seed_ip})") - # RMA only allowed with merged if self.rma and state not in (None, "merged"): raise ValueError(f"RMA operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})") - if state in ("merged", "overridden"): + if state in ("merged", "overridden", "replaced"): if self.role is None: self.role = SwitchRole.LEAF if not self.username or not self.password: @@ -562,15 +557,9 @@ def to_gathered_dict(self) -> Dict[str, Any]: Dict with seed_ip, role, auth_proto, preserve_config, username set to ``""``, password set to ``""``. """ - result = self.to_config( - exclude={ - "platform_type": True, - "poap": True, - "preprovision": True, - "rma": True, - "operation_type": True, - } - ) + result = self.to_config() + for key in ("platform_type", "poap", "preprovision", "rma", "operation_type"): + result.pop(key, None) result["username"] = "" result["password"] = "" return result @@ -583,7 +572,7 @@ def get_argument_spec(cls) -> Dict[str, Any]: state=dict( type="str", default="merged", - choices=["merged", "overridden", "deleted", "gathered"], + choices=["merged", "replaced", "overridden", "deleted", "gathered"], ), save=dict(type="bool", default=True), deploy=dict(type="bool", default=True), diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 07638ebc..25ac4af7 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -30,14 +30,18 @@ state: description: - The state of ND and switch(es) after module completion. - - C(merged) and C(overridden) are supported for POAP and pre-provision operations. + - C(merged), C(replaced), and C(overridden) are supported for POAP and pre-provision operations. - C(merged) is the only state supported for RMA. + - C(replaced) reconciles only the switches listed in C(config). Field differences + trigger delete and re-add, but fabric switches not listed in C(config) are left + untouched. - C(gathered) reads the current fabric inventory and returns it in the C(gathered) key in config format. No changes are made. type: str default: merged choices: - merged + - replaced - overridden - deleted - gathered @@ -49,6 +53,7 @@ deploy: description: - Deploy the pending configuration of the fabric after inventory is updated. + - When set to C(true), C(save) must also be C(true). type: bool default: true config: @@ -397,11 +402,15 @@ def main(): supports_check_mode=True, required_if=[ ("state", "merged", ["config"]), + ("state", "replaced", ["config"]), ], ) require_pydantic(module) + if module.params.get("deploy") and not module.params.get("save"): + module.fail_json(msg="'deploy: true' requires 'save: true'") + # Initialize logging try: log_config = Log() diff --git a/tests/integration/targets/nd_manage_switches/files/.gitignore b/tests/integration/targets/nd_manage_switches/files/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/targets/nd_manage_switches/tasks/main.yaml b/tests/integration/targets/nd_manage_switches/tasks/main.yaml index 834955ba..6f6ed05c 100644 --- a/tests/integration/targets/nd_manage_switches/tasks/main.yaml +++ b/tests/integration/targets/nd_manage_switches/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Discover ND Test Cases ansible.builtin.find: - paths: "{{ role_path }}/tests/nd" + paths: "{{ role_path }}/tests" patterns: "{{ testcase }}.yaml" connection: local register: nd_testcases diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml similarity index 95% rename from tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml rename to tests/integration/targets/nd_manage_switches/tests/deleted.yaml index 584a4496..a981a23c 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/deleted.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml @@ -92,7 +92,7 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is not part of the fabric and cannot be deleted"' + - 'result.msg == "No switches to delete - fabric already matches desired config"' tags: deleted # TC - 4 @@ -139,4 +139,5 @@ ansible.builtin.assert: that: - 'result.changed == false' + - 'result.msg == "No switches to delete - fabric already matches desired config"' tags: deleted \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml b/tests/integration/targets/nd_manage_switches/tests/gathered.yaml similarity index 100% rename from tests/integration/targets/nd_manage_switches/tests/nd/gathered.yaml rename to tests/integration/targets/nd_manage_switches/tests/gathered.yaml diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml b/tests/integration/targets/nd_manage_switches/tests/merged.yaml similarity index 96% rename from tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml rename to tests/integration/targets/nd_manage_switches/tests/merged.yaml index 4520833b..b94180d4 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/merged.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/merged.yaml @@ -42,7 +42,7 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'result.msg == "No switches to merge — fabric already matches desired config"' tags: merged # TC - 3 @@ -125,7 +125,7 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'result.msg == "No switches to merge — fabric already matches desired config"' tags: merged # TC - 6 @@ -191,7 +191,7 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'result.msg == "No switches to merge — fabric already matches desired config"' tags: merged # TC - 8 diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml similarity index 95% rename from tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml rename to tests/integration/targets/nd_manage_switches/tests/overridden.yaml index f952e8bc..0f4c2942 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/overridden.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml @@ -42,6 +42,7 @@ ansible.builtin.assert: that: - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' tags: overridden # TC - 3 @@ -151,7 +152,8 @@ - name: Assert ansible.builtin.assert: that: - - 'overridden_result.changed == false' + - 'overridden_result.changed == false' + - 'result.msg == "No switches to override — fabric already matches desired config"' tags: overridden # ---------------------------------------------- diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/poap.yaml similarity index 100% rename from tests/integration/targets/nd_manage_switches/tests/nd/poap.yaml rename to tests/integration/targets/nd_manage_switches/tests/poap.yaml diff --git a/tests/integration/targets/nd_manage_switches/tests/replaced.yaml b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml new file mode 100644 index 00000000..2f9b50ca --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml @@ -0,0 +1,136 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: replaced + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Replaced TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + deploy: "{{ test_data.deploy }}" + register: merged_result + tags: replaced + +- name: Replaced TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: replaced + +- name: Replaced TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: " {{ merged_result.changed }}" + register: result + tags: replaced + +# TC - 2 +- name: Replaced TC2 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: replaced + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: replaced + +# TC - 3 +- name: Replaced TC3 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: replaced + +- name: Import Configuration Prepare Tasks + vars: + file: replaced + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: replaced + +- name: Replaced TC3 - New Role for the Existing Switch + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_replaced_conf }}" + deploy: "{{ test_data.deploy }}" + register: replaced_result + tags: replaced + +- name: Replaced TC3 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_replaced_conf: >- + {{ (nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw2) | list) + + [nd_switches_base_conf | selectattr('seed_ip', 'equalto', test_data.sw2) | first | combine({'role': 'leaf'})] }} + delegate_to: localhost + tags: replaced + +- name: Replaced TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: replaced + +- name: Replaced TC3 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_replaced_conf }}" + changed: "{{ replaced_result.changed }}" + register: result + tags: replaced + +# TC - 4 +- name: Replaced TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + preserve_config: false + delegate_to: localhost + tags: replaced + +- name: Import Configuration Prepare Tasks + vars: + file: replaced + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: replaced + +- name: Replaced TC4 - Unspecified Role for the Existing Switch (Default, Leaf) + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_replaced_conf }}" + deploy: "{{ test_data.deploy }}" + register: replaced_result + tags: replaced + +- name: Assert + ansible.builtin.assert: + that: + - 'replaced_result.changed == false' + - 'result.msg == "No switches to replace — fabric already matches desired config"' + tags: replaced + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Replaced - Cleanup Fabric Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: replaced diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/rma.yaml similarity index 100% rename from tests/integration/targets/nd_manage_switches/tests/nd/rma.yaml rename to tests/integration/targets/nd_manage_switches/tests/rma.yaml diff --git a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml similarity index 64% rename from tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml rename to tests/integration/targets/nd_manage_switches/tests/sanity.yaml index 67b4548d..bad975f7 100644 --- a/tests/integration/targets/nd_manage_switches/tests/nd/sanity.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml @@ -47,29 +47,29 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'result.msg == "No switches to merge — fabric already matches desired config"' tags: sanity # ---------------------------------------------- # # Gathered # # ---------------------------------------------- # -# TC - 3 -- name: Sanity TC3 - Gathered - Gather Switch State in Fabric +# TC - 2 +- name: Sanity TC2 - Gathered - Gather Switch State in Fabric cisco.nd.nd_manage_switches: state: gathered fabric: "{{ test_data.test_fabric }}" register: gathered_result tags: sanity -- name: Sanity TC3 - Gathered - Build Gathered Lookup +- name: Sanity TC2 - Gathered - Build Gathered Lookup ansible.builtin.set_fact: gathered_seeds: "{{ gathered_result.gathered | map(attribute='seed_ip') | list }}" gathered_role_map: "{{ gathered_result.gathered | items2dict(key_name='seed_ip', value_name='role') }}" delegate_to: localhost tags: sanity -- name: Sanity TC3 - Gathered - Validate Gathered Count +- name: Sanity TC2 - Gathered - Validate Gathered Count ansible.builtin.assert: that: - gathered_result.gathered | length == nd_switches_base_conf | length @@ -78,7 +78,7 @@ expected {{ nd_switches_base_conf | length }} tags: sanity -- name: Sanity TC3 - Gathered - Validate Each Switch Present and Role Matches +- name: Sanity TC2 - Gathered - Validate Each Switch Present and Role Matches ansible.builtin.assert: that: - item.seed_ip in gathered_seeds @@ -90,6 +90,69 @@ loop: "{{ nd_switches_base_conf }}" tags: sanity +# ---------------------------------------------- # +# Replaced # +# ---------------------------------------------- # +# TC - 3 +- name: Sanity TC3 - Replaced - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: spine + preserve_config: false + delegate_to: localhost + tags: sanity + +- name: Import Configuration Prepare Tasks + vars: + file: sanity + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: sanity + +- name: Sanity TC3 - Replaced - New Role for the Existing Switch + cisco.nd.nd_manage_switches: &conf_replace + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_sanity_conf }}" + deploy: "{{ test_data.deploy }}" + register: replaced_result + tags: sanity + +- name: Sanity TC3 - Replaced - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_replaced_conf: >- + {{ (nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw1) | list) + + [nd_switches_base_conf | selectattr('seed_ip', 'equalto', test_data.sw1) | first | combine({'role': 'spine'})] }} + delegate_to: localhost + tags: sanity + +- name: Sanity TC3 - Replaced - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC3 - Replaced - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_replaced_conf }}" + changed: "{{ replaced_result.changed }}" + register: result + tags: sanity + +- name: Sanity TC3 - Replaced - Idempotence + cisco.nd.nd_manage_switches: *conf_replace + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to replace — fabric already matches desired config"' + tags: sanity + # ---------------------------------------------- # # Overridden # # ---------------------------------------------- # @@ -116,7 +179,7 @@ state: overridden config: "{{ nd_switches_sanity_conf }}" deploy: "{{ test_data.deploy }}" - register: result + register: overriden_result tags: sanity - name: Sanity TC4 - Overridden - Query Inventory State in Fabric @@ -130,12 +193,11 @@ cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_sanity_conf }}" - changed: "{{ create_result.changed }}" + changed: "{{ overriden_result.changed }}" register: result tags: sanity -# TC - 5 -- name: Sanity TC5 - Overridden - Idempotence +- name: Sanity TC4 - Overridden - Idempotence cisco.nd.nd_manage_switches: *conf_over register: result tags: sanity @@ -144,15 +206,15 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and there is no more device to delete in the fabric"' + - 'result.msg == "No switches to override — fabric already matches desired config"' tags: sanity # ---------------------------------------------- # # Clean-up # # ---------------------------------------------- # -# TC - 6 -- name: Sanity TC6 - Deleted - Clean up Existing devices +# TC - 5 +- name: Sanity TC5 - Deleted - Clean up Existing devices cisco.nd.nd_manage_switches: &clean fabric: "{{ test_data.test_fabric }}" state: deleted @@ -160,20 +222,20 @@ register: deleted_result tags: sanity -- name: Sanity TC6 - Reset - Prepare Conf +- name: Sanity TC5 - Reset - Prepare Conf ansible.builtin.set_fact: nd_switches_sanity_conf: delegate_to: localhost tags: sanity -- name: Sanity TC6 - Deleted - Query Inventory State in Fabric +- name: Sanity TC5 - Deleted - Query Inventory State in Fabric cisco.nd.nd_rest: path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" method: get register: query_result tags: sanity -- name: Sanity TC6 - Deleted - Validate ND Data +- name: Sanity TC5 - Deleted - Validate ND Data cisco.nd.nd_switches_validate: nd_data: "{{ query_result }}" test_data: "{{ nd_switches_sanity_conf }}" @@ -181,8 +243,8 @@ register: result tags: sanity -# TC - 7 -- name: Sanity TC7 - Deleted - Idempotence +# TC - 6 +- name: Sanity TC6 - Deleted - Idempotence cisco.nd.nd_manage_switches: *clean register: result tags: sanity @@ -191,5 +253,5 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is not part of the fabric and cannot be deleted"' + - 'result.msg == "No switches to delete - fabric already matches desired config"' tags: sanity \ No newline at end of file From 9fd18b26919456dc009943e6c75ac0e566be9b10 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Mon, 6 Apr 2026 12:25:32 +0530 Subject: [PATCH 096/109] Black Formatting Fix --- .../manage_switches/nd_switch_resources.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index ecaf64aa..ebcb3c4a 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2455,14 +2455,16 @@ def _inventory_to_config_list(self, collection: "NDConfigCollection") -> List[Di if not sw.fabric_management_ip: continue role = sw.switch_role - result.append({ - "seed_ip": sw.fabric_management_ip, - "role": getattr(role, "value", str(role)) if role else "leaf", - "auth_proto": "MD5", - "preserve_config": False, - "username": "", - "password": "", - }) + result.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": False, + "username": "", + "password": "", + } + ) return result def _proposed_to_config_list(self, configs: List["SwitchConfigModel"]) -> List[Dict[str, Any]]: From ed218e9f1296caf618cd80cf2e99765f4a815660 Mon Sep 17 00:00:00 2001 From: Mike Wiebe Date: Wed, 8 Apr 2026 09:56:08 -0400 Subject: [PATCH 097/109] Ansible ND 4.X Fabric Modules for iBGP, eBGP and External Fabric Types (#209) * Fabric modules for ibgp,ebgp,external fabrics * Update ibgp model enums * Update pydantic model and module docstrings for ibgp * Update pydantic model for ebgp * Update ebgp module doc headers * Update enums and pydantic model descriptions for external fabrics * Update ebgp module doc strings * Fix ansible sanity tests failures * Black formatting * Move common models into common location for import * Fix black formatting issue * Add unit tests for fabric endpoints * Fix ansible sanity test failures * Test cleanup * Add ibgp testing params * Fix for merged state and tests * Add more properties in ibgp merged test * Add more properties in ibgp replaced test * Refactor merged fix * Change name property to fabric_name * Add nd_info into integration tests * remove underscore between un & numbered * Address review comments * Fix list behavior bug and update module docs * Make ansible sanity happy * Make netflow_exporter udp_port optional * Organize ibgp module doc header --------- Co-authored-by: Matt Tarkington --- .../endpoints/v1/manage/manage_fabrics.py | 496 +++++ plugins/module_utils/models/base.py | 22 +- .../models/manage_fabric/enums.py | 416 ++++ .../manage_fabric/manage_fabric_common.py | 339 +++ .../manage_fabric/manage_fabric_ebgp.py | 929 ++++++++ .../manage_fabric/manage_fabric_external.py | 569 +++++ .../manage_fabric/manage_fabric_ibgp.py | 1195 +++++++++++ plugins/module_utils/nd.py | 13 +- plugins/module_utils/nd_config_collection.py | 10 +- plugins/module_utils/nd_state_machine.py | 6 +- .../orchestrators/manage_fabric_ebgp.py | 46 + .../orchestrators/manage_fabric_external.py | 46 + .../orchestrators/manage_fabric_ibgp.py | 46 + plugins/module_utils/utils.py | 13 +- plugins/modules/nd_manage_fabric_ebgp.py | 1690 +++++++++++++++ plugins/modules/nd_manage_fabric_external.py | 780 +++++++ plugins/modules/nd_manage_fabric_ibgp.py | 1888 +++++++++++++++++ .../nd_manage_fabric/tasks/fabric_ebgp.yaml | 1228 +++++++++++ .../tasks/fabric_external.yaml | 719 +++++++ .../nd_manage_fabric/tasks/fabric_ibgp.yaml | 1332 ++++++++++++ .../targets/nd_manage_fabric/tasks/main.yaml | 9 + .../targets/nd_manage_fabric/vars/main.yaml | 328 +++ .../test_endpoints_api_v1_manage_fabrics.py | 736 +++++++ 23 files changed, 12847 insertions(+), 9 deletions(-) create mode 100644 plugins/module_utils/endpoints/v1/manage/manage_fabrics.py create mode 100644 plugins/module_utils/models/manage_fabric/enums.py create mode 100644 plugins/module_utils/models/manage_fabric/manage_fabric_common.py create mode 100644 plugins/module_utils/models/manage_fabric/manage_fabric_ebgp.py create mode 100644 plugins/module_utils/models/manage_fabric/manage_fabric_external.py create mode 100644 plugins/module_utils/models/manage_fabric/manage_fabric_ibgp.py create mode 100644 plugins/module_utils/orchestrators/manage_fabric_ebgp.py create mode 100644 plugins/module_utils/orchestrators/manage_fabric_external.py create mode 100644 plugins/module_utils/orchestrators/manage_fabric_ibgp.py create mode 100644 plugins/modules/nd_manage_fabric_ebgp.py create mode 100644 plugins/modules/nd_manage_fabric_external.py create mode 100644 plugins/modules/nd_manage_fabric_ibgp.py create mode 100644 tests/integration/targets/nd_manage_fabric/tasks/fabric_ebgp.yaml create mode 100644 tests/integration/targets/nd_manage_fabric/tasks/fabric_external.yaml create mode 100644 tests/integration/targets/nd_manage_fabric/tasks/fabric_ibgp.yaml create mode 100644 tests/integration/targets/nd_manage_fabric/tasks/main.yaml create mode 100644 tests/integration/targets/nd_manage_fabric/vars/main.yaml create mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py new file mode 100644 index 00000000..8a9b1c2b --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -0,0 +1,496 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabrics endpoint models. + +This module contains endpoint definitions for fabric-related operations +in the ND Manage API. + +## Endpoints + +- `EpApiV1ManageFabricsGet` - Get a specific fabric by name + (GET /api/v1/manage/fabrics/{fabric_name}) +- `EpApiV1ManageFabricsListGet` - List all fabrics with optional filtering + (GET /api/v1/manage/fabrics) +- `EpApiV1ManageFabricsPost` - Create a new fabric + (POST /api/v1/manage/fabrics) +- `EpApiV1ManageFabricsPut` - Update a specific fabric + (PUT /api/v1/manage/fabrics/{fabric_name}) +- `EpApiV1ManageFabricsDelete` - Delete a specific fabric + (DELETE /api/v1/manage/fabrics/{fabric_name}) +- `EpApiV1ManageFabricsSummaryGet` - Get summary for a specific fabric + (GET /api/v1/manage/fabrics/{fabric_name}/summary) +""" + +from __future__ import annotations + +__metaclass__ = type + +from typing import ClassVar, Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import BasePath +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import FabricNameMixin +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import EndpointQueryParams +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import Field +from ansible_collections.cisco.nd.plugins.module_utils.types import IdentifierKey + + +class FabricsEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for the fabrics endpoint. + + ## Parameters + + - cluster_name: Name of the target Nexus Dashboard cluster to execute this API, + in a multi-cluster deployment (optional) + + ## Usage + + ```python + params = FabricsEndpointParams(cluster_name="cluster1") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1" + ``` + """ + + cluster_name: Optional[str] = Field( + default=None, + min_length=1, + description="Name of the target Nexus Dashboard cluster to execute this API, in a multi-cluster deployment", + ) + + +class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for ND Manage Fabrics endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics endpoint. + + Subclasses may override: + - ``_require_fabric_name``: set to ``False`` for collection-level endpoints + (list, create) that do not include a fabric name in the path. + - ``_path_suffix``: set to a non-empty string to append an extra segment + after the fabric name (e.g. ``"summary"``). Only used when + ``_require_fabric_name`` is ``True``. + """ + + _require_fabric_name: ClassVar[bool] = True + _path_suffix: ClassVar[Optional[str]] = None + + endpoint_params: EndpointQueryParams = Field(default_factory=EndpointQueryParams, description="Endpoint-specific query parameters") + + def set_identifiers(self, identifier: IdentifierKey = None): + self.fabric_name = identifier + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional fabric name, path suffix, and + query string. + + ## Returns + + - Complete endpoint path string + + ## Raises + + - `ValueError` if `fabric_name` is required but not set + """ + if self._require_fabric_name and self.fabric_name is None: + raise ValueError(f"{type(self).__name__}.path: fabric_name must be set before accessing path.") + segments = ["fabrics"] + if self.fabric_name is not None: + segments.append(self.fabric_name) + if self._path_suffix: + segments.append(self._path_suffix) + base_path = BasePath.path(*segments) + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + +class EpManageFabricsGet(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics GET Endpoint + + ## Description + + Endpoint to retrieve details for a specific named fabric from the ND Manage service. + The fabric name is a required path parameter. Optionally filter by cluster name + using the clusterName query parameter in multi-cluster deployments. + + ## Path + + - /api/v1/manage/fabrics/{fabric_name} + - /api/v1/manage/fabrics/{fabric_name}?clusterName=cluster1 + + ## Verb + + - GET + + ## Raises + + - `ValueError` if `fabric_name` is not set when accessing `path` + + ## Usage + + ```python + # Get details for a specific fabric + request = EpApiV1ManageFabricsGet() + request.fabric_name = "my-fabric" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/my-fabric + + # Get fabric details targeting a specific cluster in a multi-cluster deployment + request = EpApiV1ManageFabricsGet() + request.fabric_name = "my-fabric" + request.endpoint_params.cluster_name = "cluster1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/my-fabric?clusterName=cluster1 + ``` + """ + + class_name: Literal["EpApiV1ManageFabricsGet"] = Field(default="EpApiV1ManageFabricsGet", description="Class name for backward compatibility") + + endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class FabricsListEndpointParams(EndpointQueryParams): + """ + # Summary + + Query parameters for the ``GET /api/v1/manage/fabrics`` list endpoint. + + ## Parameters + + - cluster_name: Name of the target Nexus Dashboard cluster (multi-cluster deployments) + - category: Filter by fabric category (``"fabric"`` or ``"fabricGroup"``) + - filter: Lucene-format filter string + - max: Maximum number of records to return + - offset: Number of records to skip for pagination + - sort: Sort field with optional ``:desc`` suffix + + ## Usage + + ```python + params = FabricsListEndpointParams(category="fabric", max=10, offset=0) + query_string = params.to_query_string() + # Returns: "category=fabric&max=10&offset=0" + ``` + """ + + cluster_name: Optional[str] = Field( + default=None, + min_length=1, + description="Name of the target Nexus Dashboard cluster to execute this API, in a multi-cluster deployment", + ) + + category: Optional[str] = Field( + default=None, + description="Filter by category of fabric (fabric or fabricGroup)", + ) + + filter: Optional[str] = Field( + default=None, + description="Lucene format filter - Filter the response based on this filter field", + ) + + max: Optional[int] = Field( + default=None, + ge=1, + description="Number of records to return", + ) + + offset: Optional[int] = Field( + default=None, + ge=0, + description="Number of records to skip for pagination", + ) + + sort: Optional[str] = Field( + default=None, + description="Sort the records by the declared fields in either ascending (default) or descending (:desc) order", + ) + + +class EpManageFabricsListGet(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics List GET Endpoint + + ## Description + + Endpoint to list all fabrics from the ND Manage service. + Supports optional query parameters for filtering, pagination, and sorting. + + ## Path + + - ``/api/v1/manage/fabrics`` + - ``/api/v1/manage/fabrics?category=fabric&max=10`` + + ## Verb + + - GET + + ## Raises + + - None + + ## Usage + + ```python + # List all fabrics + ep = EpApiV1ManageFabricsListGet() + path = ep.path + verb = ep.verb + # Path: /api/v1/manage/fabrics + + # List fabrics with filtering and pagination + ep = EpApiV1ManageFabricsListGet() + ep.endpoint_params.category = "fabric" + ep.endpoint_params.max = 10 + path = ep.path + # Path: /api/v1/manage/fabrics?category=fabric&max=10 + ``` + """ + + _require_fabric_name: ClassVar[bool] = False + + class_name: Literal["EpApiV1ManageFabricsListGet"] = Field(default="EpApiV1ManageFabricsListGet", description="Class name for backward compatibility") + + endpoint_params: FabricsListEndpointParams = Field(default_factory=FabricsListEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class EpManageFabricsPost(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics POST Endpoint + + ## Description + + Endpoint to create a new fabric via the ND Manage service. + The request body must conform to the ``baseFabric`` schema (discriminated + by ``category``). For standard fabrics the category is ``"fabric"`` and + the body includes ``name`` plus fabric-specific properties such as + ``location``, ``licenseTier``, ``telemetryCollection``, etc. + + ## Path + + - ``/api/v1/manage/fabrics`` + - ``/api/v1/manage/fabrics?clusterName=cluster1`` + + ## Verb + + - POST + + ## Request Body (application/json) + + ``baseFabric`` schema — for a standard fabric use ``category: "fabric"`` + with at minimum: + + - ``name`` (str, required): Name of the fabric + - ``category`` (str, required): ``"fabric"`` + + ## Raises + + - None + + ## Usage + + ```python + ep = EpApiV1ManageFabricsPost() + rest_send.path = ep.path + rest_send.verb = ep.verb + rest_send.payload = { + "name": "my-fabric", + "category": "fabric", + "telemetryCollection": True, + "telemetryCollectionType": "inBand", + } + ``` + """ + + _require_fabric_name: ClassVar[bool] = False + + class_name: Literal["EpApiV1ManageFabricsPost"] = Field(default="EpApiV1ManageFabricsPost", description="Class name for backward compatibility") + + endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsPut(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics PUT Endpoint + + ## Description + + Endpoint to update an existing fabric via the ND Manage service. + The fabric name is a required path parameter. The request body must + conform to the ``baseFabric`` schema (same shape as POST/create). + + ## Path + + - ``/api/v1/manage/fabrics/{fabric_name}`` + - ``/api/v1/manage/fabrics/{fabric_name}?clusterName=cluster1`` + + ## Verb + + - PUT + + ## Request Body (application/json) + + ``baseFabric`` schema — same as create (POST). + + ## Raises + + - `ValueError` if `fabric_name` is not set when accessing `path` + + ## Usage + + ```python + ep = EpApiV1ManageFabricsPut() + ep.fabric_name = "my-fabric" + rest_send.path = ep.path + rest_send.verb = ep.verb + rest_send.payload = { + "name": "my-fabric", + "category": "fabric", + "telemetryCollection": False, + } + ``` + """ + + class_name: Literal["EpApiV1ManageFabricsPut"] = Field(default="EpApiV1ManageFabricsPut", description="Class name for backward compatibility") + + endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.PUT + + +class EpManageFabricsDelete(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics DELETE Endpoint + + ## Description + + Endpoint to delete a specific fabric from the ND Manage service. + The fabric name is a required path parameter. + + ## Path + + - ``/api/v1/manage/fabrics/{fabric_name}`` + - ``/api/v1/manage/fabrics/{fabric_name}?clusterName=cluster1`` + + ## Verb + + - DELETE + + ## Raises + + - `ValueError` if `fabric_name` is not set when accessing `path` + + ## Usage + + ```python + ep = EpApiV1ManageFabricsDelete() + ep.fabric_name = "my-fabric" + rest_send.path = ep.path + rest_send.verb = ep.verb + ``` + """ + + class_name: Literal["EpApiV1ManageFabricsDelete"] = Field(default="EpApiV1ManageFabricsDelete", description="Class name for backward compatibility") + + endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.DELETE + + +class EpManageFabricsSummaryGet(_EpManageFabricsBase): + """ + # Summary + + ND Manage Fabrics Summary GET Endpoint + + ## Description + + Endpoint to retrieve summary information for a specific fabric from + the ND Manage service. The fabric name is a required path parameter. + + ## Path + + - ``/api/v1/manage/fabrics/{fabric_name}/summary`` + - ``/api/v1/manage/fabrics/{fabric_name}/summary?clusterName=cluster1`` + + ## Verb + + - GET + + ## Raises + + - `ValueError` if `fabric_name` is not set when accessing `path` + + ## Usage + + ```python + ep = EpApiV1ManageFabricsSummaryGet() + ep.fabric_name = "my-fabric" + path = ep.path + verb = ep.verb + # Path: /api/v1/manage/fabrics/my-fabric/summary + ``` + """ + + class_name: Literal["EpApiV1ManageFabricsSummaryGet"] = Field( + default="EpApiV1ManageFabricsSummaryGet", description="Class name for backward compatibility" + ) + + _path_suffix: ClassVar[Optional[str]] = "summary" + + endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/models/base.py b/plugins/module_utils/models/base.py index a62a12b1..57689c3a 100644 --- a/plugins/module_utils/models/base.py +++ b/plugins/module_utils/models/base.py @@ -196,16 +196,26 @@ def to_diff_dict(self, **kwargs) -> Dict[str, Any]: **kwargs, ) - def get_diff(self, other: "NDBaseModel") -> bool: - """Diff comparison.""" + def get_diff(self, other: "NDBaseModel", exclude_unset: bool = False) -> bool: + """Diff comparison. + + Args: + other: The model to compare against. + exclude_unset: When True, only compare fields explicitly set in + ``other`` (via Pydantic's ``exclude_unset``). This prevents + default values from triggering false diffs during merge + operations. + """ self_data = self.to_diff_dict() - other_data = other.to_diff_dict() + other_data = other.to_diff_dict(exclude_unset=exclude_unset) return issubset(other_data, self_data) def merge(self, other: "NDBaseModel") -> "NDBaseModel": """ - Merge another model's non-None values into this instance. + Merge another model's explicitly set, non-None values into this instance. Recursively merges nested NDBaseModel fields. + Only fields present in ``other.model_fields_set`` are applied so that + Pydantic default values do not overwrite existing configuration. Returns self for chaining. """ @@ -216,6 +226,10 @@ def merge(self, other: "NDBaseModel") -> "NDBaseModel": if value is None: continue + # Only merge fields that were explicitly provided, not defaults + if field_name not in other.model_fields_set: + continue + current = getattr(self, field_name) if isinstance(current, NDBaseModel) and isinstance(value, NDBaseModel): current.merge(value) diff --git a/plugins/module_utils/models/manage_fabric/enums.py b/plugins/module_utils/models/manage_fabric/enums.py new file mode 100644 index 00000000..8bb17076 --- /dev/null +++ b/plugins/module_utils/models/manage_fabric/enums.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +# pylint: disable=wrong-import-position +# pylint: disable=missing-module-docstring +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +# Summary + +Enum definitions for Nexus Dashboard Ansible modules. + +## Enums + +- HttpVerbEnum: Enum for HTTP verb values used in endpoints. +- OperationType: Enum for operation types used by Results to determine if changes have occurred. +""" + +from __future__ import annotations + +__metaclass__ = type + +from enum import Enum + + +class FabricTypeEnum(str, Enum): + """ + # Summary + + Enumeration of supported fabric types for discriminated union. + + ## Values + + - `VXLAN_IBGP` - VXLAN fabric with iBGP overlay + - `VXLAN_EBGP` - VXLAN fabric with eBGP overlay + """ + + VXLAN_IBGP = "vxlanIbgp" + VXLAN_EBGP = "vxlanEbgp" + EXTERNAL_CONNECTIVITY = "externalConnectivity" + + +class AlertSuspendEnum(str, Enum): + """ + # Summary + + Enumeration for alert suspension states. + + ## Values + + - `ENABLED` - Alerts are enabled + - `DISABLED` - Alerts are disabled + """ + + ENABLED = "enabled" + DISABLED = "disabled" + + +class LicenseTierEnum(str, Enum): + """ + # Summary + + Enumeration for license tier options. + + ## Values + + - `ESSENTIALS` - Essentials license tier + - `ADVANTAGE` - Advantage license tier + - `PREMIER` - Premier license tier + """ + + ESSENTIALS = "essentials" + ADVANTAGE = "advantage" + PREMIER = "premier" + + +class ReplicationModeEnum(str, Enum): + """ + # Summary + + Enumeration for replication modes. + + ## Values + + - `MULTICAST` - Multicast replication + - `INGRESS` - Ingress replication + """ + + MULTICAST = "multicast" + INGRESS = "ingress" + + +class OverlayModeEnum(str, Enum): + """ + # Summary + + Enumeration for overlay modes. + + ## Values + + - `CLI` - CLI based configuration + - `CONFIG_PROFILE` - Configuration profile based + """ + + CLI = "cli" + CONFIG_PROFILE = "config-profile" + + +class LinkStateRoutingProtocolEnum(str, Enum): + """ + # Summary + + Enumeration for underlay routing protocols. + + ## Values + + - `OSPF` - Open Shortest Path First + - `ISIS` - Intermediate System to Intermediate System + """ + + OSPF = "ospf" + ISIS = "isis" + + +class CoppPolicyEnum(str, Enum): + """ + # Summary + + Enumeration for CoPP policy options. + """ + + DENSE = "dense" + LENIENT = "lenient" + MODERATE = "moderate" + STRICT = "strict" + MANUAL = "manual" + + +class FabricInterfaceTypeEnum(str, Enum): + """ + # Summary + + Enumeration for fabric interface types. + """ + + P2P = "p2p" + UNNUMBERED = "unNumbered" + + +class GreenfieldDebugFlagEnum(str, Enum): + """ + # Summary + + Enumeration for greenfield debug flag. + """ + + ENABLE = "enable" + DISABLE = "disable" + + +class IsisLevelEnum(str, Enum): + """ + # Summary + + Enumeration for IS-IS levels. + """ + + LEVEL_1 = "level-1" + LEVEL_2 = "level-2" + + +class SecurityGroupStatusEnum(str, Enum): + """ + # Summary + + Enumeration for security group status. + """ + + ENABLED = "enabled" + ENABLED_STRICT = "enabledStrict" + ENABLED_LOOSE = "enabledLoose" + ENABLE_PENDING = "enablePending" + ENABLE_PENDING_STRICT = "enablePendingStrict" + ENABLE_PENDING_LOOSE = "enablePendingLoose" + DISABLE_PENDING = "disablePending" + DISABLED = "disabled" + + +class StpRootOptionEnum(str, Enum): + """ + # Summary + + Enumeration for STP root options. + """ + + RPVST_PLUS = "rpvst+" + MST = "mst" + UNMANAGED = "unmanaged" + + +class VpcPeerKeepAliveOptionEnum(str, Enum): + """ + # Summary + + Enumeration for vPC peer keep-alive options. + """ + + LOOPBACK = "loopback" + MANAGEMENT = "management" + + +class DhcpProtocolVersionEnum(str, Enum): + """ + # Summary + + Enumeration for DHCP protocol version options. + """ + + DHCPV4 = "dhcpv4" + DHCPV6 = "dhcpv6" + + +class PowerRedundancyModeEnum(str, Enum): + """ + # Summary + + Enumeration for power redundancy mode options. + """ + + REDUNDANT = "redundant" + COMBINED = "combined" + INPUT_SRC_REDUNDANT = "inputSrcRedundant" + + +class BgpAsModeEnum(str, Enum): + """ + # Summary + + Enumeration for eBGP BGP AS mode options. + """ + + MULTI_AS = "multiAS" + SAME_TIER_AS = "sameTierAS" + + +class FirstHopRedundancyProtocolEnum(str, Enum): + """ + # Summary + + Enumeration for first-hop redundancy protocol options. + """ + + HSRP = "hsrp" + VRRP = "vrrp" + + +class AimlQosPolicyEnum(str, Enum): + """ + # Summary + + Enumeration for AI/ML QoS policy options based on fabric link speed. + """ + + V_800G = "800G" + V_400G = "400G" + V_100G = "100G" + V_25G = "25G" + USER_DEFINED = "User-defined" + + +class AllowVlanOnLeafTorPairingEnum(str, Enum): + """ + # Summary + + Enumeration for allowed VLAN on leaf-TOR pairing port-channels. + """ + + NONE = "none" + ALL = "all" + + +class BgpAuthenticationKeyTypeEnum(str, Enum): + """ + # Summary + + Enumeration for BGP authentication key encryption types. + """ + + THREE_DES = "3des" + TYPE6 = "type6" + TYPE7 = "type7" + + +class DlbMixedModeDefaultEnum(str, Enum): + """ + # Summary + + Enumeration for DLB mixed mode default options. + """ + + ECMP = "ecmp" + FLOWLET = "flowlet" + PER_PACKET = "per-packet" + + +class DlbModeEnum(str, Enum): + """ + # Summary + + Enumeration for DLB mode options. + """ + + FLOWLET = "flowlet" + PER_PACKET = "per-packet" + POLICY_DRIVEN_FLOWLET = "policy-driven-flowlet" + POLICY_DRIVEN_PER_PACKET = "policy-driven-per-packet" + POLICY_DRIVEN_MIXED_MODE = "policy-driven-mixed-mode" + + +class MacsecAlgorithmEnum(str, Enum): + """ + # Summary + + Enumeration for MACsec cryptographic algorithm options. + """ + + AES_128_CMAC = "AES_128_CMAC" + AES_256_CMAC = "AES_256_CMAC" + + +class MacsecCipherSuiteEnum(str, Enum): + """ + # Summary + + Enumeration for MACsec cipher suite options. + """ + + GCM_AES_128 = "GCM-AES-128" + GCM_AES_256 = "GCM-AES-256" + GCM_AES_XPN_128 = "GCM-AES-XPN-128" + GCM_AES_XPN_256 = "GCM-AES-XPN-256" + + +class RendezvousPointCountEnum(int, Enum): + """ + # Summary + + Enumeration for number of spines acting as Rendezvous-Points. + """ + + TWO = 2 + FOUR = 4 + + +class RendezvousPointModeEnum(str, Enum): + """ + # Summary + + Enumeration for multicast rendezvous point mode. + """ + + ASM = "asm" + BIDIR = "bidir" + + +class RouteReflectorCountEnum(int, Enum): + """ + # Summary + + Enumeration for number of spines acting as Route-Reflectors. + """ + + TWO = 2 + FOUR = 4 + + +class UnderlayMulticastGroupAddressLimitEnum(int, Enum): + """ + # Summary + + Enumeration for underlay multicast group address limit. + """ + + V_128 = 128 + V_512 = 512 + + +class TelemetryCollectionTypeEnum(str, Enum): + """ + # Summary + + Enumeration for telemetry collection method options. + """ + + IN_BAND = "inBand" + OUT_OF_BAND = "outOfBand" + + +class TelemetryStreamingProtocolEnum(str, Enum): + """ + # Summary + + Enumeration for telemetry streaming protocol options. + """ + + IPV4 = "ipv4" + IPV6 = "ipv6" + + +class VrfLiteAutoConfigEnum(str, Enum): + """ + # Summary + + Enumeration for VRF Lite auto-config deployment options. + """ + + MANUAL = "manual" + BACK2BACK_AND_TO_EXTERNAL = "back2BackAndToExternal" diff --git a/plugins/module_utils/models/manage_fabric/manage_fabric_common.py b/plugins/module_utils/models/manage_fabric/manage_fabric_common.py new file mode 100644 index 00000000..9cb3af47 --- /dev/null +++ b/plugins/module_utils/models/manage_fabric/manage_fabric_common.py @@ -0,0 +1,339 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +# Summary + +Common Pydantic models shared across fabric types (iBGP, eBGP, External Connectivity). + +## Models + +- `LocationModel` - Geographic location coordinates +- `NetflowExporterModel` - Netflow exporter configuration +- `NetflowRecordModel` - Netflow record configuration +- `NetflowMonitorModel` - Netflow monitor configuration +- `NetflowSettingsModel` - Complete netflow settings +- `BootstrapSubnetModel` - Bootstrap subnet configuration +- `TelemetryFlowCollectionModel` - Telemetry flow collection settings +- `TelemetryMicroburstModel` - Microburst detection configuration +- `TelemetryAnalysisSettingsModel` - Telemetry analysis configuration +- `TelemetryEnergyManagementModel` - Energy management telemetry +- `TelemetryNasExportSettingsModel` - NAS export settings +- `TelemetryNasModel` - NAS telemetry configuration +- `TelemetrySettingsModel` - Complete telemetry configuration +- `ExternalStreamingSettingsModel` - External streaming configuration +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +from typing import List, Dict, Any, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ConfigDict, + Field, +) + +# Regex from OpenAPI schema: bgpAsn accepts plain integers (1-4294967295) and +# dotted four-byte ASN notation (1-65535).(0-65535) +BGP_ASN_RE = re.compile( + r"^(([1-9]{1}[0-9]{0,8}|[1-3]{1}[0-9]{1,9}|[4]{1}([0-1]{1}[0-9]{8}" + r"|[2]{1}([0-8]{1}[0-9]{7}|[9]{1}([0-3]{1}[0-9]{6}|[4]{1}([0-8]{1}[0-9]{5}" + r"|[9]{1}([0-5]{1}[0-9]{4}|[6]{1}([0-6]{1}[0-9]{3}|[7]{1}([0-1]{1}[0-9]{2}" + r"|[2]{1}([0-8]{1}[0-9]{1}|[9]{1}[0-5]{1})))))))))" + r"|([1-5]\d{4}|[1-9]\d{0,3}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5])" + r"(\.([1-5]\d{4}|[1-9]\d{0,3}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5]|0))?)$" +) + + +class LocationModel(NDNestedModel): + """ + # Summary + + Geographic location coordinates for the fabric. + + ## Raises + + - `ValueError` - If latitude or longitude are outside valid ranges + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + latitude: float = Field(description="Latitude coordinate (-90 to 90)", ge=-90.0, le=90.0) + longitude: float = Field(description="Longitude coordinate (-180 to 180)", ge=-180.0, le=180.0) + + +class NetflowExporterModel(NDNestedModel): + """ + # Summary + + Netflow exporter configuration for telemetry. + + ## Raises + + - `ValueError` - If UDP port is outside valid range or IP address is invalid + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + exporter_name: str = Field(alias="exporterName", description="Name of the netflow exporter") + exporter_ip: str = Field(alias="exporterIp", description="IP address of the netflow collector") + vrf: str = Field(description="VRF name for the exporter", default="management") + source_interface_name: str = Field(alias="sourceInterfaceName", description="Source interface name") + udp_port: Optional[int] = Field(alias="udpPort", description="UDP port for netflow export", ge=1, le=65535, default=None) + + +class NetflowRecordModel(NDNestedModel): + """ + # Summary + + Netflow record configuration defining flow record templates. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + record_name: str = Field(alias="recordName", description="Name of the netflow record") + record_template: str = Field(alias="recordTemplate", description="Template type for the record") + layer2_record: bool = Field(alias="layer2Record", description="Enable layer 2 record fields", default=False) + + +class NetflowMonitorModel(NDNestedModel): + """ + # Summary + + Netflow monitor configuration linking records to exporters. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + monitor_name: str = Field(alias="monitorName", description="Name of the netflow monitor") + record_name: str = Field(alias="recordName", description="Associated record name") + exporter1_name: str = Field(alias="exporter1Name", description="Primary exporter name") + exporter2_name: str = Field(alias="exporter2Name", description="Secondary exporter name", default="") + + +class NetflowSettingsModel(NDNestedModel): + """ + # Summary + + Complete netflow configuration including exporters, records, and monitors. + + ## Raises + + - `ValueError` - If netflow lists are inconsistent with netflow enabled state + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + netflow: bool = Field(description="Enable netflow collection", default=False) + netflow_exporter_collection: List[NetflowExporterModel] = Field( + alias="netflowExporterCollection", description="List of netflow exporters", default_factory=list + ) + netflow_record_collection: List[NetflowRecordModel] = Field(alias="netflowRecordCollection", description="List of netflow records", default_factory=list) + netflow_monitor_collection: List[NetflowMonitorModel] = Field( + alias="netflowMonitorCollection", description="List of netflow monitors", default_factory=list + ) + + +class BootstrapSubnetModel(NDNestedModel): + """ + # Summary + + Bootstrap subnet configuration for fabric initialization. + + ## Raises + + - `ValueError` - If IP addresses or subnet prefix are invalid + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + start_ip: str = Field(alias="startIp", description="Starting IP address of the bootstrap range") + end_ip: str = Field(alias="endIp", description="Ending IP address of the bootstrap range") + default_gateway: str = Field(alias="defaultGateway", description="Default gateway for bootstrap subnet") + subnet_prefix: int = Field(alias="subnetPrefix", description="Subnet prefix length", ge=8, le=30) + + +class TelemetryFlowCollectionModel(NDNestedModel): + """ + # Summary + + Telemetry flow collection configuration. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + traffic_analytics: str = Field(alias="trafficAnalytics", description="Traffic analytics state", default="enabled") + traffic_analytics_scope: str = Field(alias="trafficAnalyticsScope", description="Traffic analytics scope", default="intraFabric") + operating_mode: str = Field(alias="operatingMode", description="Operating mode", default="flowTelemetry") + udp_categorization: str = Field(alias="udpCategorization", description="UDP categorization", default="enabled") + + +class TelemetryMicroburstModel(NDNestedModel): + """ + # Summary + + Microburst detection configuration. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + microburst: bool = Field(description="Enable microburst detection", default=False) + sensitivity: str = Field(description="Microburst sensitivity level", default="low") + + +class TelemetryAnalysisSettingsModel(NDNestedModel): + """ + # Summary + + Telemetry analysis configuration. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + is_enabled: bool = Field(alias="isEnabled", description="Enable telemetry analysis", default=False) + + +class TelemetryEnergyManagementModel(NDNestedModel): + """ + # Summary + + Energy management telemetry configuration. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + cost: float = Field(description="Energy cost per unit", default=1.2) + + +class TelemetryNasExportSettingsModel(NDNestedModel): + """ + # Summary + + NAS export settings for telemetry. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + export_type: str = Field(alias="exportType", description="Export type", default="full") + export_format: str = Field(alias="exportFormat", description="Export format", default="json") + + +class TelemetryNasModel(NDNestedModel): + """ + # Summary + + NAS (Network Attached Storage) telemetry configuration. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + server: str = Field(description="NAS server address", default="") + export_settings: TelemetryNasExportSettingsModel = Field( + alias="exportSettings", description="NAS export settings", default_factory=TelemetryNasExportSettingsModel + ) + + +class TelemetrySettingsModel(NDNestedModel): + """ + # Summary + + Complete telemetry configuration for the fabric. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + flow_collection: TelemetryFlowCollectionModel = Field( + alias="flowCollection", description="Flow collection settings", default_factory=TelemetryFlowCollectionModel + ) + microburst: TelemetryMicroburstModel = Field(description="Microburst detection settings", default_factory=TelemetryMicroburstModel) + analysis_settings: TelemetryAnalysisSettingsModel = Field( + alias="analysisSettings", description="Analysis settings", default_factory=TelemetryAnalysisSettingsModel + ) + nas: TelemetryNasModel = Field(description="NAS telemetry configuration", default_factory=TelemetryNasModel) + energy_management: TelemetryEnergyManagementModel = Field( + alias="energyManagement", description="Energy management settings", default_factory=TelemetryEnergyManagementModel + ) + + +class ExternalStreamingSettingsModel(NDNestedModel): + """ + # Summary + + External streaming configuration for events and data export. + + ## Raises + + None + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + email: List[Dict[str, Any]] = Field(description="Email streaming configuration", default_factory=list) + message_bus: List[Dict[str, Any]] = Field(alias="messageBus", description="Message bus configuration", default_factory=list) + syslog: Dict[str, Any] = Field( + description="Syslog streaming configuration", default_factory=lambda: {"collectionSettings": {"anomalies": []}, "facility": "", "servers": []} + ) + webhooks: List[Dict[str, Any]] = Field(description="Webhook configuration", default_factory=list) + + +# Export all models for external use +__all__ = [ + "LocationModel", + "NetflowExporterModel", + "NetflowRecordModel", + "NetflowMonitorModel", + "NetflowSettingsModel", + "BootstrapSubnetModel", + "TelemetryFlowCollectionModel", + "TelemetryMicroburstModel", + "TelemetryAnalysisSettingsModel", + "TelemetryEnergyManagementModel", + "TelemetryNasExportSettingsModel", + "TelemetryNasModel", + "TelemetrySettingsModel", + "ExternalStreamingSettingsModel", + "BGP_ASN_RE", +] diff --git a/plugins/module_utils/models/manage_fabric/manage_fabric_ebgp.py b/plugins/module_utils/models/manage_fabric/manage_fabric_ebgp.py new file mode 100644 index 00000000..77aef7f4 --- /dev/null +++ b/plugins/module_utils/models/manage_fabric/manage_fabric_ebgp.py @@ -0,0 +1,929 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +from typing import List, Dict, Any, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ConfigDict, + Field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.enums import ( + FabricTypeEnum, + AlertSuspendEnum, + LicenseTierEnum, + OverlayModeEnum, + ReplicationModeEnum, + CoppPolicyEnum, + GreenfieldDebugFlagEnum, + VpcPeerKeepAliveOptionEnum, + BgpAsModeEnum, + FirstHopRedundancyProtocolEnum, + AimlQosPolicyEnum, + AllowVlanOnLeafTorPairingEnum, + BgpAuthenticationKeyTypeEnum, + DhcpProtocolVersionEnum, + DlbMixedModeDefaultEnum, + DlbModeEnum, + MacsecAlgorithmEnum, + MacsecCipherSuiteEnum, + PowerRedundancyModeEnum, + RendezvousPointCountEnum, + RendezvousPointModeEnum, + UnderlayMulticastGroupAddressLimitEnum, + VrfLiteAutoConfigEnum, +) + +# Re-use shared nested models from the common module +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_common import ( + BGP_ASN_RE, + LocationModel, + NetflowSettingsModel, + BootstrapSubnetModel, + TelemetrySettingsModel, + ExternalStreamingSettingsModel, +) + +""" +# Comprehensive Pydantic models for eBGP VXLAN fabric management via Nexus Dashboard + +This module provides Pydantic models for creating, updating, and deleting +eBGP VXLAN fabrics through the Nexus Dashboard Fabric Controller (NDFC) API. + +## Models Overview + +- `VxlanEbgpManagementModel` - eBGP VXLAN specific management settings +- `FabricEbgpModel` - Complete fabric creation model for eBGP fabrics +- `FabricEbgpDeleteModel` - Fabric deletion model + +## Usage + +```python +# Create a new eBGP VXLAN fabric +fabric_data = { + "name": "MyEbgpFabric", + "management": { + "type": "vxlanEbgp", + "bgpAsnAutoAllocation": True, + "bgpAsnRange": "65000-65535" + } +} +fabric = FabricEbgpModel(**fabric_data) +``` +""" + + +class VxlanEbgpManagementModel(NDNestedModel): + """ + # Summary + + Comprehensive eBGP VXLAN fabric management configuration. + + This model contains all settings specific to eBGP VXLAN fabric types including + overlay configuration, BGP AS allocation, multicast settings, and advanced features. + + ## Raises + + - `ValueError` - If BGP ASN, VLAN ranges, or IP ranges are invalid + - `TypeError` - If required string fields are not provided + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + # Fabric Type (required for discriminated union) + type: Literal[FabricTypeEnum.VXLAN_EBGP] = Field(description="Type of the fabric", default=FabricTypeEnum.VXLAN_EBGP) + + # Core eBGP Configuration + bgp_asn: Optional[str] = Field( + alias="bgpAsn", description="BGP Autonomous System Number 1-4294967295 | 1-65535[.0-65535]. Optional when bgpAsnAutoAllocation is True.", default=None + ) + site_id: Optional[str] = Field(alias="siteId", description="For EVPN Multi-Site Support. Defaults to Fabric ASN", default="") + bgp_as_mode: BgpAsModeEnum = Field( + alias="bgpAsMode", + description=( + "Multi-AS Unique ASN per Leaf/Border/Border Gateway (Borders and border gateways are " + "allowed to share ASN). Same-Tier-AS Leafs share one ASN, Borders/border gateways share one ASN" + ), + default=BgpAsModeEnum.MULTI_AS, + ) + bgp_asn_auto_allocation: bool = Field( + alias="bgpAsnAutoAllocation", + description=("Automatically allocate and track BGP ASN for leafs, borders and border gateways " "in Multi-AS mode"), + default=True, + ) + bgp_asn_range: Optional[str] = Field( + alias="bgpAsnRange", description=("BGP ASN range for auto-allocation " "(minimum: 1 or 1.0, maximum: 4294967295 or 65535.65535)"), default=None + ) + bgp_allow_as_in_num: int = Field(alias="bgpAllowAsInNum", description="Number of occurrences of ASN allowed in the BGP AS-path", default=1) + bgp_max_path: int = Field(alias="bgpMaxPath", description="BGP Maximum Paths", default=4) + bgp_underlay_failure_protect: bool = Field(alias="bgpUnderlayFailureProtect", description="Enable BGP underlay failure protection", default=False) + auto_configure_ebgp_evpn_peering: bool = Field( + alias="autoConfigureEbgpEvpnPeering", description=("Automatically configure eBGP EVPN overlay peering between leaf and spine switches"), default=True + ) + allow_leaf_same_as: bool = Field(alias="allowLeafSameAs", description="Leafs can have same BGP ASN even when AS mode is Multi-AS", default=False) + assign_ipv4_to_loopback0: bool = Field( + alias="assignIpv4ToLoopback0", + description=( + "In an IPv6 routed fabric or VXLAN EVPN fabric with IPv6 underlay, assign IPv4 address " "used for BGP Router ID to the routing loopback interface" + ), + default=True, + ) + evpn: bool = Field(description=("Enable BGP EVPN as the control plane and VXLAN as the data plane for this fabric"), default=True) + route_map_tag: int = Field(alias="routeMapTag", description="Tag for Route Map FABRIC-RMAP-REDIST-SUBNET. (Min:0, Max:4294967295)", default=12345) + disable_route_map_tag: bool = Field(alias="disableRouteMapTag", description="No match tag for Route Map FABRIC-RMAP-REDIST-SUBNET", default=False) + leaf_bgp_as: Optional[str] = Field(alias="leafBgpAs", description="Autonomous system number 1-4294967295 | 1-65535[.0-65535]", default=None) + border_bgp_as: Optional[str] = Field(alias="borderBgpAs", description="Autonomous system number 1-4294967295 | 1-65535[.0-65535]", default=None) + super_spine_bgp_as: Optional[str] = Field(alias="superSpineBgpAs", description="Autonomous system number 1-4294967295 | 1-65535[.0-65535]", default=None) + + # Propagated from FabricEbgpModel + name: Optional[str] = Field(description="Fabric name", min_length=1, max_length=64, default="") + + # Network Addressing + bgp_loopback_id: int = Field(alias="bgpLoopbackId", description="Underlay Routing Loopback Id", ge=0, le=1023, default=0) + bgp_loopback_ip_range: str = Field(alias="bgpLoopbackIpRange", description="Typically Loopback0 IP Address Range", default="10.2.0.0/22") + bgp_loopback_ipv6_range: str = Field(alias="bgpLoopbackIpv6Range", description="Typically Loopback0 IPv6 Address Range", default="fd00::a02:0/119") + nve_loopback_id: int = Field( + alias="nveLoopbackId", + description=("Underlay VTEP loopback Id associated with the Network Virtualization Edge (nve) interface"), + ge=0, + le=1023, + default=1, + ) + nve_loopback_ip_range: str = Field(alias="nveLoopbackIpRange", description="Typically Loopback1 IP Address Range", default="10.3.0.0/22") + nve_loopback_ipv6_range: str = Field( + alias="nveLoopbackIpv6Range", description="Typically Loopback1 and Anycast Loopback IPv6 Address Range", default="fd00::a03:0/118" + ) + anycast_loopback_id: int = Field( + alias="anycastLoopbackId", description="Underlay Anycast Loopback Id. Used for vPC Peering in VXLANv6 Fabrics", default=10 + ) + anycast_rendezvous_point_ip_range: str = Field( + alias="anycastRendezvousPointIpRange", description="Anycast or Phantom RP IP Address Range", default="10.254.254.0/24" + ) + ipv6_anycast_rendezvous_point_ip_range: str = Field( + alias="ipv6AnycastRendezvousPointIpRange", description="Anycast RP IPv6 Address Range", default="fd00::254:254:0/118" + ) + intra_fabric_subnet_range: str = Field( + alias="intraFabricSubnetRange", description="Address range to assign numbered and peer link SVI IPs", default="10.4.0.0/16" + ) + + # VLAN and VNI Ranges + l2_vni_range: str = Field(alias="l2VniRange", description="Overlay network identifier range (minimum: 1, maximum: 16777214)", default="30000-49000") + l3_vni_range: str = Field(alias="l3VniRange", description="Overlay VRF identifier range (minimum: 1, maximum: 16777214)", default="50000-59000") + network_vlan_range: str = Field( + alias="networkVlanRange", description="Per Switch Overlay Network VLAN Range (minimum: 2, maximum: 4094)", default="2300-2999" + ) + vrf_vlan_range: str = Field(alias="vrfVlanRange", description="Per Switch Overlay VRF VLAN Range (minimum: 2, maximum: 4094)", default="2000-2299") + + # Overlay Configuration + overlay_mode: OverlayModeEnum = Field( + alias="overlayMode", description="Overlay Mode. VRF/Network configuration using config-profile or CLI", default=OverlayModeEnum.CLI + ) + replication_mode: ReplicationModeEnum = Field( + alias="replicationMode", description="Replication Mode for BUM Traffic", default=ReplicationModeEnum.MULTICAST + ) + multicast_group_subnet: str = Field( + alias="multicastGroupSubnet", + description=("Multicast pool prefix between 8 to 30. A multicast group ipv4 from this pool " "is used for BUM traffic for each overlay network."), + default="239.1.1.0/25", + ) + auto_generate_multicast_group_address: bool = Field( + alias="autoGenerateMulticastGroupAddress", + description=("Generate a new multicast group address from the multicast pool using a round-robin approach"), + default=False, + ) + underlay_multicast_group_address_limit: UnderlayMulticastGroupAddressLimitEnum = Field( + alias="underlayMulticastGroupAddressLimit", + description=("The maximum supported value is 128 for NX-OS version 10.2(1) or earlier " "and 512 for versions above 10.2(1)"), + default=UnderlayMulticastGroupAddressLimitEnum.V_128, + ) + tenant_routed_multicast: bool = Field(alias="tenantRoutedMulticast", description="For Overlay ipv4 Multicast Support In VXLAN Fabrics", default=False) + tenant_routed_multicast_ipv6: bool = Field( + alias="tenantRoutedMulticastIpv6", description="For Overlay IPv6 Multicast Support In VXLAN Fabrics", default=False + ) + first_hop_redundancy_protocol: FirstHopRedundancyProtocolEnum = Field( + alias="firstHopRedundancyProtocol", description="First Hop Redundancy Protocol HSRP or VRRP", default=FirstHopRedundancyProtocolEnum.HSRP + ) + + # Multicast / Rendezvous Point + rendezvous_point_count: RendezvousPointCountEnum = Field( + alias="rendezvousPointCount", description="Number of spines acting as Rendezvous-Points (RPs)", default=RendezvousPointCountEnum.TWO + ) + rendezvous_point_loopback_id: int = Field(alias="rendezvousPointLoopbackId", description="Rendezvous point loopback Id", default=254) + rendezvous_point_mode: RendezvousPointModeEnum = Field( + alias="rendezvousPointMode", description="Multicast rendezvous point Mode. For ipv6 underlay, please use asm only", default=RendezvousPointModeEnum.ASM + ) + phantom_rendezvous_point_loopback_id1: int = Field( + alias="phantomRendezvousPointLoopbackId1", description="Underlay phantom rendezvous point loopback primary Id for PIM Bi-dir deployments", default=2 + ) + phantom_rendezvous_point_loopback_id2: int = Field( + alias="phantomRendezvousPointLoopbackId2", description="Underlay phantom rendezvous point loopback secondary Id for PIM Bi-dir deployments", default=3 + ) + phantom_rendezvous_point_loopback_id3: int = Field( + alias="phantomRendezvousPointLoopbackId3", description="Underlay phantom rendezvous point loopback tertiary Id for PIM Bi-dir deployments", default=4 + ) + phantom_rendezvous_point_loopback_id4: int = Field( + alias="phantomRendezvousPointLoopbackId4", + description=("Underlay phantom rendezvous point loopback quaternary Id for PIM Bi-dir deployments"), + default=5, + ) + l3vni_multicast_group: str = Field( + alias="l3vniMulticastGroup", description="Default Underlay Multicast group IPv4 address assigned for every overlay VRF", default="239.1.1.0" + ) + l3_vni_ipv6_multicast_group: str = Field( + alias="l3VniIpv6MulticastGroup", description="Default Underlay Multicast group IP6 address assigned for every overlay VRF", default="ff1e::" + ) + ipv6_multicast_group_subnet: str = Field( + alias="ipv6MulticastGroupSubnet", description="IPv6 Multicast address with prefix 112 to 128", default="ff1e::/121" + ) + mvpn_vrf_route_import_id: bool = Field( + alias="mvpnVrfRouteImportId", description="Enable MVPN VRI ID Generation For Tenant Routed Multicast With IPv4 Underlay", default=True + ) + mvpn_vrf_route_import_id_range: Optional[str] = Field( + alias="mvpnVrfRouteImportIdRange", + description=( + "MVPN VRI ID (minimum: 1, maximum: 65535) for vPC, applicable when TRM enabled " + "with IPv6 underlay, or mvpnVrfRouteImportId enabled with IPv4 underlay" + ), + default=None, + ) + vrf_route_import_id_reallocation: bool = Field( + alias="vrfRouteImportIdReallocation", description="One time VRI ID re-allocation based on 'MVPN VRI ID Range'", default=False + ) + + # Advanced Features + anycast_gateway_mac: str = Field(alias="anycastGatewayMac", description="Shared anycast gateway MAC address for all VTEPs", default="2020.0000.00aa") + target_subnet_mask: int = Field(alias="targetSubnetMask", description="Mask for underlay subnet IP range", ge=24, le=31, default=30) + fabric_mtu: int = Field(alias="fabricMtu", description="Intra Fabric Interface MTU. Must be an even number", ge=1500, le=9216, default=9216) + l2_host_interface_mtu: int = Field( + alias="l2HostInterfaceMtu", description="Layer 2 host interface MTU. Must be an even number", ge=1500, le=9216, default=9216 + ) + l3_vni_no_vlan_default_option: bool = Field( + alias="l3VniNoVlanDefaultOption", + description=( + "L3 VNI configuration without VLAN configuration. This value is propagated on vrf " + "creation as the default value of 'Enable L3VNI w/o VLAN' in vrf" + ), + default=False, + ) + underlay_ipv6: bool = Field(alias="underlayIpv6", description="If not enabled, IPv4 underlay is used", default=False) + static_underlay_ip_allocation: bool = Field( + alias="staticUnderlayIpAllocation", description="Checking this will disable Dynamic Underlay IP Address Allocations", default=False + ) + anycast_border_gateway_advertise_physical_ip: bool = Field( + alias="anycastBorderGatewayAdvertisePhysicalIp", + description=("To advertise Anycast Border Gateway PIP as VTEP. " "Effective on MSD fabric 'Recalculate Config'"), + default=False, + ) + + # VPC Configuration + vpc_domain_id_range: str = Field( + alias="vpcDomainIdRange", description="vPC Domain id range (minimum: 1, maximum: 1000) to use for new pairings", default="1-1000" + ) + vpc_peer_link_vlan: str = Field(alias="vpcPeerLinkVlan", description="VLAN range (minimum: 2, maximum: 4094) for vPC Peer Link SVI", default="3600") + vpc_peer_link_enable_native_vlan: bool = Field(alias="vpcPeerLinkEnableNativeVlan", description="Enable VpcPeer Link for Native Vlan", default=False) + vpc_peer_keep_alive_option: VpcPeerKeepAliveOptionEnum = Field( + alias="vpcPeerKeepAliveOption", description="Use vPC Peer Keep Alive with Loopback or Management", default=VpcPeerKeepAliveOptionEnum.MANAGEMENT + ) + vpc_auto_recovery_timer: int = Field(alias="vpcAutoRecoveryTimer", description="vPC auto recovery timer (in seconds)", ge=240, le=3600, default=360) + vpc_delay_restore_timer: int = Field(alias="vpcDelayRestoreTimer", description="vPC delay restore timer (in seconds)", ge=1, le=3600, default=150) + vpc_peer_link_port_channel_id: str = Field( + alias="vpcPeerLinkPortChannelId", description="vPC Peer Link Port Channel ID (minimum: 1, maximum: 4096)", default="500" + ) + vpc_ipv6_neighbor_discovery_sync: bool = Field( + alias="vpcIpv6NeighborDiscoverySync", description="Enable IPv6 ND synchronization between vPC peers", default=True + ) + vpc_layer3_peer_router: bool = Field(alias="vpcLayer3PeerRouter", description="Enable Layer-3 Peer-Router on all Leaf switches", default=True) + vpc_tor_delay_restore_timer: int = Field(alias="vpcTorDelayRestoreTimer", description="vPC delay restore timer for ToR switches (in seconds)", default=30) + fabric_vpc_domain_id: bool = Field( + alias="fabricVpcDomainId", description="Enable the same vPC Domain Id for all vPC Pairs. Not Recommended.", default=False + ) + shared_vpc_domain_id: int = Field(alias="sharedVpcDomainId", description="vPC Domain Id to be used on all vPC pairs", default=1) + fabric_vpc_qos: bool = Field(alias="fabricVpcQos", description="Qos on spines for guaranteed delivery of vPC Fabric Peering communication", default=False) + fabric_vpc_qos_policy_name: str = Field( + alias="fabricVpcQosPolicyName", description="Qos Policy name should be same on all spines", default="spine_qos_for_fabric_vpc_peering" + ) + enable_peer_switch: bool = Field(alias="enablePeerSwitch", description="Enable the vPC peer-switch feature on ToR switches", default=False) + + # Per-VRF Loopback + per_vrf_loopback_auto_provision: bool = Field( + alias="perVrfLoopbackAutoProvision", + description=( + "Auto provision an IPv4 loopback on a VTEP on VRF attachment. Note: Enabling this option " + "auto-provisions loopback on existing VRF attachments and also when Edit, QuickAttach, or " + "Multiattach actions are performed. Provisioned loopbacks cannot be deleted until VRFs " + "are unattached." + ), + default=False, + ) + per_vrf_loopback_ip_range: str = Field( + alias="perVrfLoopbackIpRange", description="Prefix pool to assign IPv4 addresses to loopbacks on VTEPs on a per VRF basis", default="10.5.0.0/22" + ) + per_vrf_loopback_auto_provision_ipv6: bool = Field( + alias="perVrfLoopbackAutoProvisionIpv6", description="Auto provision an IPv6 loopback on a VTEP on VRF attachment.", default=False + ) + per_vrf_loopback_ipv6_range: str = Field( + alias="perVrfLoopbackIpv6Range", description="Prefix pool to assign IPv6 addresses to loopbacks on VTEPs on a per VRF basis", default="fd00::a05:0/112" + ) + + # Templates + vrf_template: str = Field(alias="vrfTemplate", description="Default overlay VRF template for leafs", default="Default_VRF_Universal") + network_template: str = Field(alias="networkTemplate", description="Default overlay network template for leafs", default="Default_Network_Universal") + vrf_extension_template: str = Field( + alias="vrfExtensionTemplate", description="Default overlay VRF template for borders", default="Default_VRF_Extension_Universal" + ) + network_extension_template: str = Field( + alias="networkExtensionTemplate", description="Default overlay network template for borders", default="Default_Network_Extension_Universal" + ) + + # Optional Advanced Settings + performance_monitoring: bool = Field( + alias="performanceMonitoring", + description=("If enabled, switch metrics are collected through periodic SNMP polling. " "Alternative to real-time telemetry"), + default=False, + ) + tenant_dhcp: bool = Field(alias="tenantDhcp", description="Enable tenant DHCP", default=True) + advertise_physical_ip: bool = Field( + alias="advertisePhysicalIp", description="For Primary VTEP IP Advertisement As Next-Hop Of Prefix Routes", default=False + ) + advertise_physical_ip_on_border: bool = Field( + alias="advertisePhysicalIpOnBorder", + description=("Enable advertise-pip on vPC borders and border gateways only. " "Applicable only when vPC advertise-pip is not enabled"), + default=True, + ) + + # Protocol Settings — BGP + bgp_authentication: bool = Field(alias="bgpAuthentication", description="Enables or disables the BGP authentication", default=False) + bgp_authentication_key_type: BgpAuthenticationKeyTypeEnum = Field( + alias="bgpAuthenticationKeyType", + description="BGP key encryption type: 3 - 3DES, 6 - Cisco type 6, 7 - Cisco type 7", + default=BgpAuthenticationKeyTypeEnum.THREE_DES, + ) + bgp_authentication_key: str = Field(alias="bgpAuthenticationKey", description="Encrypted BGP authentication key based on type", default="") + + # Protocol Settings — BFD + bfd: bool = Field(description="Enable BFD. Valid for IPv4 Underlay only", default=False) + bfd_ibgp: bool = Field(alias="bfdIbgp", description="Enable BFD For iBGP", default=False) + bfd_authentication: bool = Field(alias="bfdAuthentication", description="Enable BFD Authentication. Valid for P2P Interfaces only", default=False) + bfd_authentication_key_id: int = Field(alias="bfdAuthenticationKeyId", description="BFD Authentication Key ID", default=100) + bfd_authentication_key: str = Field(alias="bfdAuthenticationKey", description="Encrypted SHA1 secret value", default="") + + # Protocol Settings — PIM + pim_hello_authentication: bool = Field(alias="pimHelloAuthentication", description="Valid for IPv4 Underlay only", default=False) + pim_hello_authentication_key: str = Field(alias="pimHelloAuthenticationKey", description="3DES Encrypted", default="") + + # Management Settings + nxapi: bool = Field(description="Enable NX-API over HTTPS", default=False) + nxapi_http: bool = Field(alias="nxapiHttp", description="Enable NX-API over HTTP", default=False) + nxapi_https_port: int = Field(alias="nxapiHttpsPort", description="HTTPS port for NX-API", ge=1, le=65535, default=443) + nxapi_http_port: int = Field(alias="nxapiHttpPort", description="HTTP port for NX-API", ge=1, le=65535, default=80) + + # Bootstrap / Day-0 / DHCP + day0_bootstrap: bool = Field(alias="day0Bootstrap", description="Automatic IP Assignment For POAP", default=False) + bootstrap_subnet_collection: List[BootstrapSubnetModel] = Field( + alias="bootstrapSubnetCollection", description="List of IPv4 or IPv6 subnets to be used for bootstrap", default_factory=list + ) + local_dhcp_server: bool = Field(alias="localDhcpServer", description="Automatic IP Assignment For POAP From Local DHCP Server", default=False) + dhcp_protocol_version: DhcpProtocolVersionEnum = Field( + alias="dhcpProtocolVersion", description="IP protocol version for Local DHCP Server", default=DhcpProtocolVersionEnum.DHCPV4 + ) + dhcp_start_address: str = Field(alias="dhcpStartAddress", description="DHCP Scope Start Address For Switch POAP", default="") + dhcp_end_address: str = Field(alias="dhcpEndAddress", description="DHCP Scope End Address For Switch POAP", default="") + management_gateway: str = Field(alias="managementGateway", description="Default Gateway For Management VRF On The Switch", default="") + management_ipv4_prefix: int = Field(alias="managementIpv4Prefix", description="Switch Mgmt IP Subnet Prefix if ipv4", default=24) + management_ipv6_prefix: int = Field(alias="managementIpv6Prefix", description="Switch Management IP Subnet Prefix if ipv6", default=64) + + # Netflow Settings + netflow_settings: NetflowSettingsModel = Field(alias="netflowSettings", description="Netflow configuration", default_factory=NetflowSettingsModel) + + # Backup / Restore + real_time_backup: Optional[bool] = Field( + alias="realTimeBackup", description=("Backup hourly only if there is any config deployment since last backup"), default=None + ) + scheduled_backup: Optional[bool] = Field(alias="scheduledBackup", description="Enable backup at the specified time daily", default=None) + scheduled_backup_time: str = Field( + alias="scheduledBackupTime", description=("Time (UTC) in 24 hour format to take a daily backup if enabled (00:00 to 23:59)"), default="" + ) + + # VRF Lite / Sub-Interface + sub_interface_dot1q_range: str = Field( + alias="subInterfaceDot1qRange", description="Per aggregation dot1q range for VRF-Lite connectivity (minimum: 2, maximum: 4093)", default="2-511" + ) + vrf_lite_auto_config: VrfLiteAutoConfigEnum = Field( + alias="vrfLiteAutoConfig", + description=( + "VRF Lite Inter-Fabric Connection Deployment Options. If 'back2BackAndToExternal' is " + "selected, VRF Lite IFCs are auto created between border devices of two Easy Fabrics, " + "and between border devices in Easy Fabric and edge routers in External Fabric. " + "The IP address is taken from the 'VRF Lite Subnet IP Range' pool." + ), + default=VrfLiteAutoConfigEnum.MANUAL, + ) + vrf_lite_subnet_range: str = Field(alias="vrfLiteSubnetRange", description="Address range to assign P2P Interfabric Connections", default="10.33.0.0/16") + vrf_lite_subnet_target_mask: int = Field(alias="vrfLiteSubnetTargetMask", description="VRF Lite Subnet Mask", default=30) + auto_unique_vrf_lite_ip_prefix: bool = Field( + alias="autoUniqueVrfLiteIpPrefix", + description=( + "When enabled, IP prefix allocated to the VRF LITE IFC is not reused on VRF extension " + "over VRF LITE IFC. Instead, unique IP Subnet is allocated for each VRF extension " + "over VRF LITE IFC." + ), + default=False, + ) + + # Leaf / TOR + leaf_tor_id_range: bool = Field(alias="leafTorIdRange", description="Use specific vPC/Port-channel ID range for leaf-tor pairings", default=False) + leaf_tor_vpc_port_channel_id_range: str = Field( + alias="leafTorVpcPortChannelIdRange", + description=( + "Specify vPC/Port-channel ID range (minimum: 1, maximum: 4096), this range is used " + "for auto-allocating vPC/Port-Channel IDs for leaf-tor pairings" + ), + default="1-499", + ) + allow_vlan_on_leaf_tor_pairing: AllowVlanOnLeafTorPairingEnum = Field( + alias="allowVlanOnLeafTorPairing", + description="Set trunk allowed vlan to 'none' or 'all' for leaf-tor pairing port-channels", + default=AllowVlanOnLeafTorPairingEnum.NONE, + ) + + # DNS / NTP / Syslog Collections + ntp_server_collection: List[str] = Field( + default_factory=lambda: ["string"], alias="ntpServerCollection", description="List of NTP server IPv4/IPv6 addresses and/or hostnames" + ) + ntp_server_vrf_collection: List[str] = Field( + default_factory=lambda: ["string"], + alias="ntpServerVrfCollection", + description=("NTP Server VRFs. One VRF for all NTP servers or a list of VRFs, one per NTP server"), + ) + dns_collection: List[str] = Field(default_factory=lambda: ["5.192.28.174"], alias="dnsCollection", description="List of IPv4 and IPv6 DNS addresses") + dns_vrf_collection: List[str] = Field( + default_factory=lambda: ["string"], + alias="dnsVrfCollection", + description=("DNS Server VRFs. One VRF for all DNS servers or a list of VRFs, one per DNS server"), + ) + syslog_server_collection: List[str] = Field( + default_factory=lambda: ["string"], alias="syslogServerCollection", description="List of Syslog server IPv4/IPv6 addresses and/or hostnames" + ) + syslog_server_vrf_collection: List[str] = Field( + default_factory=lambda: ["string"], + alias="syslogServerVrfCollection", + description=("Syslog Server VRFs. One VRF for all Syslog servers or a list of VRFs, " "one per Syslog server"), + ) + syslog_severity_collection: List[int] = Field( + default_factory=lambda: [7], alias="syslogSeverityCollection", description="List of Syslog severity values, one per Syslog server" + ) + + # Extra Config / Pre-Interface Config / AAA / Banner + banner: str = Field( + description=("Message of the Day (motd) banner. Delimiter char (very first char is delimiter char) " "followed by message ending with delimiter"), + default="", + ) + extra_config_leaf: str = Field( + alias="extraConfigLeaf", + description=( + "Additional CLIs as captured from the show running configuration, added after interface " + "configurations for all switches with a VTEP unless they have some spine role" + ), + default="", + ) + extra_config_spine: str = Field( + alias="extraConfigSpine", + description=( + "Additional CLIs as captured from the show running configuration, added after interface " "configurations for all switches with some spine role" + ), + default="", + ) + extra_config_tor: str = Field( + alias="extraConfigTor", + description=("Additional CLIs as captured from the show running configuration, added after interface " "configurations for all ToRs"), + default="", + ) + extra_config_intra_fabric_links: str = Field(alias="extraConfigIntraFabricLinks", description="Additional CLIs for all Intra-Fabric links", default="") + extra_config_aaa: str = Field(alias="extraConfigAaa", description="AAA Configurations", default="") + extra_config_nxos_bootstrap: str = Field( + alias="extraConfigNxosBootstrap", description="Additional CLIs required during device bootup/login e.g. AAA/Radius", default="" + ) + aaa: bool = Field(description="Include AAA configs from Manageability tab during device bootup", default=False) + pre_interface_config_leaf: str = Field( + alias="preInterfaceConfigLeaf", + description=( + "Additional CLIs as captured from the show running configuration, added before interface " + "configurations for all switches with a VTEP unless they have some spine role" + ), + default="", + ) + pre_interface_config_spine: str = Field( + alias="preInterfaceConfigSpine", + description=( + "Additional CLIs as captured from the show running configuration, added before interface " "configurations for all switches with some spine role" + ), + default="", + ) + pre_interface_config_tor: str = Field( + alias="preInterfaceConfigTor", + description=("Additional CLIs as captured from the show running configuration, added before interface " "configurations for all ToRs"), + default="", + ) + + # System / Compliance / OAM / Misc + greenfield_debug_flag: GreenfieldDebugFlagEnum = Field( + alias="greenfieldDebugFlag", + description=("Allow switch configuration to be cleared without a reload when " "preserveConfig is set to false"), + default=GreenfieldDebugFlagEnum.DISABLE, + ) + interface_statistics_load_interval: int = Field( + alias="interfaceStatisticsLoadInterval", description="Interface Statistics Load Interval. Time in seconds", default=10 + ) + nve_hold_down_timer: int = Field(alias="nveHoldDownTimer", description="NVE Source Inteface HoldDown Time in seconds", default=180) + next_generation_oam: bool = Field( + alias="nextGenerationOAM", + description=("Enable the Next Generation (NG) OAM feature for all switches in the fabric " "to aid in trouble-shooting VXLAN EVPN fabrics"), + default=True, + ) + ngoam_south_bound_loop_detect: bool = Field( + alias="ngoamSouthBoundLoopDetect", description="Enable the Next Generation (NG) OAM southbound loop detection", default=False + ) + ngoam_south_bound_loop_detect_probe_interval: int = Field( + alias="ngoamSouthBoundLoopDetectProbeInterval", + description=("Set Next Generation (NG) OAM southbound loop detection probe interval in seconds."), + default=300, + ) + ngoam_south_bound_loop_detect_recovery_interval: int = Field( + alias="ngoamSouthBoundLoopDetectRecoveryInterval", + description=("Set the Next Generation (NG) OAM southbound loop detection recovery interval in seconds"), + default=600, + ) + strict_config_compliance_mode: bool = Field( + alias="strictConfigComplianceMode", + description=("Enable bi-directional compliance checks to flag additional configs in the running config " "that are not in the intent/expected config"), + default=False, + ) + advanced_ssh_option: bool = Field( + alias="advancedSshOption", + description=("Enable AAA IP Authorization. Enable only, when IP Authorization is enabled " "in the AAA Server"), + default=False, + ) + copp_policy: CoppPolicyEnum = Field( + alias="coppPolicy", + description=("Fabric wide CoPP policy. Customized CoPP policy should be provided " "when 'manual' is selected."), + default=CoppPolicyEnum.STRICT, + ) + power_redundancy_mode: PowerRedundancyModeEnum = Field( + alias="powerRedundancyMode", description="Default Power Supply Mode for NX-OS Switches", default=PowerRedundancyModeEnum.REDUNDANT + ) + heartbeat_interval: int = Field(alias="heartbeatInterval", description="XConnect heartbeat interval for periodic link status checks", default=190) + snmp_trap: bool = Field(alias="snmpTrap", description="Configure ND as a receiver for SNMP traps", default=True) + cdp: bool = Field(description="Enable CDP on management interface", default=False) + real_time_interface_statistics_collection: bool = Field( + alias="realTimeInterfaceStatisticsCollection", description="Enable Real Time Interface Statistics Collection. Valid for NX-OS only", default=False + ) + tcam_allocation: bool = Field( + alias="tcamAllocation", description=("TCAM commands are automatically generated for VxLAN and vPC Fabric Peering when Enabled"), default=True + ) + allow_smart_switch_onboarding: bool = Field( + alias="allowSmartSwitchOnboarding", description="Enable onboarding of smart switches to Hypershield for firewall service", default=False + ) + + # Queuing / QoS + default_queuing_policy: bool = Field(alias="defaultQueuingPolicy", description="Enable Default Queuing Policies", default=False) + default_queuing_policy_cloudscale: str = Field( + alias="defaultQueuingPolicyCloudscale", + description=("Queuing Policy for all 92xx, -EX, -FX, -FX2, -FX3, -GX series switches in the fabric"), + default="queuing_policy_default_8q_cloudscale", + ) + default_queuing_policy_r_series: str = Field( + alias="defaultQueuingPolicyRSeries", description="Queueing policy for all Nexus R-series switches", default="queuing_policy_default_r_series" + ) + default_queuing_policy_other: str = Field( + alias="defaultQueuingPolicyOther", description="Queuing Policy for all other switches in the fabric", default="queuing_policy_default_other" + ) + aiml_qos: bool = Field( + alias="aimlQos", + description=("Configures QoS and Queuing Policies specific to N9K Cloud Scale (CS) & Silicon One (S1) " "switch fabric for AI network workloads"), + default=False, + ) + aiml_qos_policy: AimlQosPolicyEnum = Field( + alias="aimlQosPolicy", + description=("Queuing Policy based on predominant fabric link speed: 800G / 400G / 100G / 25G. " "User-defined allows for custom configuration."), + default=AimlQosPolicyEnum.V_400G, + ) + roce_v2: str = Field( + alias="roceV2", + description=( + "DSCP for RDMA traffic: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43," + "cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="26", + ) + cnp: str = Field( + description=( + "DSCP value for Congestion Notification: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43," + "cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="48", + ) + wred_min: int = Field(alias="wredMin", description="WRED minimum threshold (in kbytes)", default=950) + wred_max: int = Field(alias="wredMax", description="WRED maximum threshold (in kbytes)", default=3000) + wred_drop_probability: int = Field(alias="wredDropProbability", description="Drop probability %", default=7) + wred_weight: int = Field(alias="wredWeight", description="Influences how quickly WRED reacts to queue depth changes", default=0) + bandwidth_remaining: int = Field(alias="bandwidthRemaining", description="Percentage of remaining bandwidth allocated to AI traffic queues", default=50) + dlb: bool = Field( + description=( + "Enables fabric-level Dynamic Load Balancing (DLB) configuration. " "Note: Inter-Switch-Links (ISL) will be configured as DLB Interfaces" + ), + default=False, + ) + dlb_mode: DlbModeEnum = Field( + alias="dlbMode", + description=( + "Select system-wide flowlet, per-packet (packet spraying) or policy driven mixed mode. " + "Note: Mixed mode is supported on Silicon One (S1) platform only." + ), + default=DlbModeEnum.FLOWLET, + ) + dlb_mixed_mode_default: DlbMixedModeDefaultEnum = Field( + alias="dlbMixedModeDefault", description="Default load balancing mode for policy driven mixed mode DLB", default=DlbMixedModeDefaultEnum.ECMP + ) + flowlet_aging: Optional[int] = Field( + alias="flowletAging", + description=( + "Flowlet aging timer in microseconds. Valid range depends on platform: " + "Cloud Scale (CS)=1-2000000 (default 500), Silicon One (S1)=1-1024 (default 256)" + ), + default=None, + ) + flowlet_dscp: str = Field( + alias="flowletDscp", + description=( + "DSCP values for flowlet load balancing: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43," + "cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="", + ) + per_packet_dscp: str = Field( + alias="perPacketDscp", + description=( + "DSCP values for per-packet load balancing: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43," + "cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="", + ) + ai_load_sharing: bool = Field( + alias="aiLoadSharing", description=("Enable IP load sharing using source and destination address for AI workloads"), default=False + ) + priority_flow_control_watch_interval: Optional[int] = Field( + alias="priorityFlowControlWatchInterval", + description=("Acceptable values from 101 to 1000 (milliseconds). " "Leave blank for system default (100ms)."), + default=None, + ) + + # PTP + ptp: bool = Field(description="Enable Precision Time Protocol (PTP)", default=False) + ptp_loopback_id: int = Field(alias="ptpLoopbackId", description="Precision Time Protocol Source Loopback Id", default=0) + ptp_domain_id: int = Field(alias="ptpDomainId", description="Multiple Independent PTP Clocking Subdomains on a Single Network", default=0) + + # Private VLAN + private_vlan: bool = Field(alias="privateVlan", description="Enable PVLAN on switches except spines and super spines", default=False) + default_private_vlan_secondary_network_template: str = Field( + alias="defaultPrivateVlanSecondaryNetworkTemplate", description="Default PVLAN secondary network template", default="Pvlan_Secondary_Network" + ) + + # MACsec + macsec: bool = Field( + description=( + "Enable MACsec in the fabric. MACsec fabric parameters are used for configuring " "MACsec on a fabric link if MACsec is enabled on the link." + ), + default=False, + ) + macsec_cipher_suite: MacsecCipherSuiteEnum = Field( + alias="macsecCipherSuite", description="Configure Cipher Suite", default=MacsecCipherSuiteEnum.GCM_AES_XPN_256 + ) + macsec_key_string: str = Field(alias="macsecKeyString", description="MACsec Primary Key String. Cisco Type 7 Encrypted Octet String", default="") + macsec_algorithm: MacsecAlgorithmEnum = Field( + alias="macsecAlgorithm", description="MACsec Primary Cryptographic Algorithm. AES_128_CMAC or AES_256_CMAC", default=MacsecAlgorithmEnum.AES_128_CMAC + ) + macsec_fallback_key_string: str = Field( + alias="macsecFallbackKeyString", description="MACsec Fallback Key String. Cisco Type 7 Encrypted Octet String", default="" + ) + macsec_fallback_algorithm: MacsecAlgorithmEnum = Field( + alias="macsecFallbackAlgorithm", + description="MACsec Fallback Cryptographic Algorithm. AES_128_CMAC or AES_256_CMAC", + default=MacsecAlgorithmEnum.AES_128_CMAC, + ) + macsec_report_timer: int = Field(alias="macsecReportTimer", description="MACsec Operational Status periodic report timer in minutes", default=5) + + # Hypershield / Connectivity + enable_dpu_pinning: bool = Field( + alias="enableDpuPinning", description="Enable pinning of VRFs and networks to specific DPUs on smart switches", default=False + ) + connectivity_domain_name: Optional[str] = Field(alias="connectivityDomainName", description="Domain name to connect to Hypershield", default=None) + hypershield_connectivity_proxy_server: Optional[str] = Field( + alias="hypershieldConnectivityProxyServer", + description="IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication", + default=None, + ) + hypershield_connectivity_proxy_server_port: Optional[int] = Field( + alias="hypershieldConnectivityProxyServerPort", description="Proxy port number for communication with Hypershield", default=None + ) + hypershield_connectivity_source_intf: Optional[str] = Field( + alias="hypershieldConnectivitySourceIntf", description="Loopback interface on smart switch for communication with Hypershield", default=None + ) + + @field_validator("bgp_asn") + @classmethod + def validate_bgp_asn(cls, value: Optional[str]) -> Optional[str]: + """ + # Summary + + Validate BGP ASN format and range when provided. + + ## Raises + + - `ValueError` - If value does not match the expected ASN format + """ + if value is None: + return value + if not BGP_ASN_RE.match(value): + raise ValueError(f"Invalid BGP ASN '{value}'. " "Expected a plain integer (1-4294967295) or dotted notation (1-65535.0-65535).") + return value + + @field_validator("site_id") + @classmethod + def validate_site_id(cls, value: str) -> str: + """ + # Summary + + Validate site ID format. + + ## Raises + + - `ValueError` - If site ID is not numeric or outside valid range + """ + if value == "": + return value + if not value.isdigit(): + raise ValueError(f"Site ID must be numeric, got: {value}") + site_id_int = int(value) + if not (1 <= site_id_int <= 281474976710655): + raise ValueError(f"Site ID must be between 1 and 281474976710655, got: {site_id_int}") + return value + + @field_validator("anycast_gateway_mac") + @classmethod + def validate_mac_address(cls, value: str) -> str: + """ + # Summary + + Validate MAC address format. + + ## Raises + + - `ValueError` - If MAC address format is invalid + """ + mac_pattern = re.compile(r"^([0-9a-fA-F]{4}\.){2}[0-9a-fA-F]{4}$") + if not mac_pattern.match(value): + raise ValueError(f"Invalid MAC address format, expected xxxx.xxxx.xxxx, got: {value}") + return value.lower() + + +class FabricEbgpModel(NDBaseModel): + """ + # Summary + + Complete model for creating a new eBGP VXLAN fabric. + + ## Raises + + - `ValueError` - If required fields are missing or invalid + - `TypeError` - If field types don't match expected types + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + identifiers: ClassVar[Optional[List[str]]] = ["fabric_name"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + + # Basic Fabric Properties + category: Literal["fabric"] = Field(description="Resource category", default="fabric") + fabric_name: str = Field(alias="name", description="Fabric name", min_length=1, max_length=64) + location: Optional[LocationModel] = Field(description="Geographic location of the fabric", default=None) + + # License and Operations + license_tier: LicenseTierEnum = Field(alias="licenseTier", description="License tier", default=LicenseTierEnum.PREMIER) + alert_suspend: AlertSuspendEnum = Field(alias="alertSuspend", description="Alert suspension state", default=AlertSuspendEnum.DISABLED) + telemetry_collection: bool = Field(alias="telemetryCollection", description="Enable telemetry collection", default=False) + telemetry_collection_type: str = Field(alias="telemetryCollectionType", description="Telemetry collection type", default="outOfBand") + telemetry_streaming_protocol: str = Field(alias="telemetryStreamingProtocol", description="Telemetry streaming protocol", default="ipv4") + telemetry_source_interface: str = Field(alias="telemetrySourceInterface", description="Telemetry source interface", default="") + telemetry_source_vrf: str = Field(alias="telemetrySourceVrf", description="Telemetry source VRF", default="") + security_domain: str = Field(alias="securityDomain", description="Security domain", default="all") + + # Core Management Configuration + management: Optional[VxlanEbgpManagementModel] = Field(description="eBGP VXLAN management configuration", default=None) + + # Optional Advanced Settings + telemetry_settings: Optional[TelemetrySettingsModel] = Field(alias="telemetrySettings", description="Telemetry configuration", default=None) + external_streaming_settings: ExternalStreamingSettingsModel = Field( + alias="externalStreamingSettings", description="External streaming settings", default_factory=ExternalStreamingSettingsModel + ) + + @field_validator("fabric_name") + @classmethod + def validate_fabric_name(cls, value: str) -> str: + """ + # Summary + + Validate fabric name format and characters. + + ## Raises + + - `ValueError` - If name contains invalid characters or format + """ + if not re.match(r"^[a-zA-Z0-9_-]+$", value): + raise ValueError(f"Fabric name can only contain letters, numbers, underscores, and hyphens, got: {value}") + return value + + @model_validator(mode="after") + def validate_fabric_consistency(self) -> "FabricEbgpModel": + """ + # Summary + + Validate consistency between fabric settings and management configuration. + + ## Raises + + - `ValueError` - If fabric settings are inconsistent + """ + if self.management is not None and self.management.type != FabricTypeEnum.VXLAN_EBGP: + raise ValueError(f"Management type must be {FabricTypeEnum.VXLAN_EBGP}") + + # Propagate fabric name to management model + if self.management is not None: + self.management.name = self.fabric_name + + # Propagate BGP ASN to site_id if both are set and site_id is empty + if self.management is not None and self.management.site_id == "" and self.management.bgp_asn is not None: + bgp_asn = self.management.bgp_asn + if "." in bgp_asn: + high, low = bgp_asn.split(".") + self.management.site_id = str(int(high) * 65536 + int(low)) + else: + self.management.site_id = bgp_asn + + # Auto-create default telemetry settings if collection is enabled + if self.telemetry_collection and self.telemetry_settings is None: + self.telemetry_settings = TelemetrySettingsModel() + + return self + + def to_diff_dict(self, **kwargs) -> Dict[str, Any]: + """Export for diff comparison, excluding fields that ND overrides for eBGP fabrics.""" + d = super().to_diff_dict(**kwargs) + # ND always returns nxapiHttp=True for eBGP fabrics regardless of the configured value, + # so exclude it from diff comparison to prevent a persistent false-positive diff. + if "management" in d: + d["management"].pop("nxapiHttp", None) + return d + + @classmethod + def get_argument_spec(cls) -> Dict: + return dict( + state={ + "type": "str", + "default": "merged", + "choices": ["merged", "replaced", "deleted", "overridden"], + }, + config={"required": False, "type": "list", "elements": "dict"}, + ) + + +# Export all models for external use +__all__ = [ + "VxlanEbgpManagementModel", + "FabricEbgpModel", + "FabricTypeEnum", + "AlertSuspendEnum", + "LicenseTierEnum", + "ReplicationModeEnum", + "OverlayModeEnum", + "BgpAsModeEnum", + "FirstHopRedundancyProtocolEnum", + "VpcPeerKeepAliveOptionEnum", + "CoppPolicyEnum", + "GreenfieldDebugFlagEnum", +] diff --git a/plugins/module_utils/models/manage_fabric/manage_fabric_external.py b/plugins/module_utils/models/manage_fabric/manage_fabric_external.py new file mode 100644 index 00000000..893c908a --- /dev/null +++ b/plugins/module_utils/models/manage_fabric/manage_fabric_external.py @@ -0,0 +1,569 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +from typing import List, Dict, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ConfigDict, + Field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.enums import ( + FabricTypeEnum, + AlertSuspendEnum, + LicenseTierEnum, + CoppPolicyEnum, + DhcpProtocolVersionEnum, + PowerRedundancyModeEnum, + TelemetryCollectionTypeEnum, + TelemetryStreamingProtocolEnum, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_common import ( + BGP_ASN_RE, + LocationModel, + NetflowExporterModel, + NetflowRecordModel, + NetflowMonitorModel, + NetflowSettingsModel, + BootstrapSubnetModel, + TelemetryFlowCollectionModel, + TelemetryMicroburstModel, + TelemetryAnalysisSettingsModel, + TelemetryEnergyManagementModel, + TelemetrySettingsModel, + ExternalStreamingSettingsModel, +) + +""" +# Comprehensive Pydantic models for External Connectivity fabric management via Nexus Dashboard + +This module provides comprehensive Pydantic models for creating, updating, and deleting +External Connectivity fabrics through the Nexus Dashboard Fabric Controller (NDFC) API. + +## Models Overview + +- `ExternalConnectivityManagementModel` - External Connectivity specific management settings +- `FabricExternalConnectivityModel` - Complete fabric creation model + +## Usage + +```python +# Create a new External Connectivity fabric +fabric_data = { + "name": "MyExtFabric", + "location": {"latitude": 37.7749, "longitude": -122.4194}, + "management": { + "type": "externalConnectivity", + "bgp_asn": "65001", + } +} +fabric = FabricExternalConnectivityModel(**fabric_data) +``` +""" + + +class ExternalConnectivityManagementModel(NDNestedModel): + """ + # Summary + + Comprehensive External Connectivity fabric management configuration. + + This model contains all settings specific to External Connectivity fabric types including + BGP configuration, bootstrap settings, and advanced features. + + ## Raises + + - `ValueError` - If BGP ASN or IP ranges are invalid + - `TypeError` - If required string fields are not provided + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + # Fabric Type (required for discriminated union) + type: Literal[FabricTypeEnum.EXTERNAL_CONNECTIVITY] = Field(description="Fabric management type", default=FabricTypeEnum.EXTERNAL_CONNECTIVITY) + + # Core Configuration + bgp_asn: str = Field( + alias="bgpAsn", + description="Autonomous system number 1-4294967295 | 1-65535[.0-65535]", + ) + + # Name under management section is optional for backward compatibility + name: Optional[str] = Field(description="Fabric name", min_length=1, max_length=64, default="") + + # AAA + aaa: bool = Field( + description="Include AAA configs from Advanced tab during device bootup", + default=False, + ) + + # SSH + advanced_ssh_option: bool = Field( + alias="advancedSshOption", + description="Enable only, when IP Authorization is enabled in the AAA Server", + default=False, + ) + + # Loopback + allow_same_loopback_ip_on_switches: bool = Field( + alias="allowSameLoopbackIpOnSwitches", + description=("Allow the same loopback IP address to be configured on multiple" " switches (e.g. RP loopback IP)"), + default=False, + ) + + # Smart Switch + allow_smart_switch_onboarding: bool = Field( + alias="allowSmartSwitchOnboarding", + description=("Enable onboarding of smart switches to Hypershield" " for firewall service"), + default=False, + ) + + # Bootstrap Subnet Collection + bootstrap_subnet_collection: List[BootstrapSubnetModel] = Field( + alias="bootstrapSubnetCollection", + description="List of IPv4 or IPv6 subnets to be used for bootstrap", + default_factory=list, + ) + + # CDP + cdp: bool = Field(description="Enable CDP on management interface", default=False) + + # CoPP Policy + copp_policy: CoppPolicyEnum = Field( + alias="coppPolicy", + description=("Fabric wide CoPP policy. Customized CoPP policy should be" " provided when 'manual' is selected."), + default=CoppPolicyEnum.MANUAL, + ) + + # BGP Configuration + create_bgp_config: bool = Field( + alias="createBgpConfig", + description="Generate BGP configuration for core and edge routers", + default=True, + ) + + # Bootstrap Settings + day0_bootstrap: bool = Field( + alias="day0Bootstrap", + description="Support day 0 touchless switch bringup", + default=False, + ) + day0_plug_and_play: bool = Field( + alias="day0PlugAndPlay", + description="Enable Plug n Play for Catalyst 9000 switches", + default=False, + ) + + # DHCP + dhcp_end_address: str = Field( + alias="dhcpEndAddress", + description="DHCP Scope End Address For Switch POAP", + default="", + ) + dhcp_protocol_version: DhcpProtocolVersionEnum = Field( + alias="dhcpProtocolVersion", + description="IP protocol version for Local DHCP Server", + default=DhcpProtocolVersionEnum.DHCPV4, + ) + dhcp_start_address: str = Field( + alias="dhcpStartAddress", + description="DHCP Scope Start Address For Switch POAP", + default="", + ) + + # DNS + dns_collection: List[str] = Field( + alias="dnsCollection", + description="List of IPv4 and IPv6 DNS addresses", + default_factory=list, + ) + dns_vrf_collection: List[str] = Field( + alias="dnsVrfCollection", + description=("DNS Server VRFs. One VRF for all DNS servers or a list of VRFs," " one per DNS server"), + default_factory=list, + ) + + # Domain + domain_name: str = Field( + alias="domainName", + description="Domain name for DHCP server PnP block", + default="", + ) + + # DPU Pinning + enable_dpu_pinning: bool = Field( + alias="enableDpuPinning", + description=("Enable pinning of VRFs and networks to specific DPUs" " on smart switches"), + default=False, + ) + + # Extra Config + extra_config_aaa: str = Field( + alias="extraConfigAaa", + description="Additional CLIs for AAA Configuration", + default="", + ) + extra_config_fabric: str = Field( + alias="extraConfigFabric", + description="Additional CLIs for all switches", + default="", + ) + extra_config_nxos_bootstrap: str = Field( + alias="extraConfigNxosBootstrap", + description=("Additional CLIs required during device bootup/login" " e.g. AAA/Radius (NX-OS)"), + default="", + ) + extra_config_xe_bootstrap: str = Field( + alias="extraConfigXeBootstrap", + description=("Additional CLIs required during device bootup/login" " e.g. AAA/Radius (IOS-XE)"), + default="", + ) + + # Inband Management + inband_day0_bootstrap: bool = Field( + alias="inbandDay0Bootstrap", + description="Support day 0 touchless switch bringup via inband management", + default=False, + ) + inband_management: bool = Field( + alias="inbandManagement", + description=("Import switches with reachability over the switch" " front-panel ports"), + default=False, + ) + + # Interface Statistics + interface_statistics_load_interval: int = Field( + alias="interfaceStatisticsLoadInterval", + description="Interface Statistics Load Interval Time in seconds", + default=10, + ) + + # Local DHCP Server + local_dhcp_server: bool = Field( + alias="localDhcpServer", + description="Automatic IP Assignment For POAP from Local DHCP Server", + default=False, + ) + + # Management + management_gateway: str = Field( + alias="managementGateway", + description="Default Gateway For Management VRF On The Switch", + default="", + ) + management_ipv4_prefix: int = Field( + alias="managementIpv4Prefix", + description="Switch Mgmt IP Subnet Prefix if ipv4", + default=24, + ) + management_ipv6_prefix: int = Field( + alias="managementIpv6Prefix", + description="Switch Management IP Subnet Prefix if ipv6", + default=64, + ) + + # Monitored Mode + monitored_mode: bool = Field( + alias="monitoredMode", + description=("If enabled, fabric is only monitored." " No configuration will be deployed"), + default=False, + ) + + # MPLS Handoff + mpls_handoff: bool = Field( + alias="mplsHandoff", + description="Enable MPLS Handoff", + default=False, + ) + mpls_loopback_identifier: Optional[int] = Field( + alias="mplsLoopbackIdentifier", + description="Underlay MPLS Loopback Identifier", + default=None, + ) + mpls_loopback_ip_range: str = Field( + alias="mplsLoopbackIpRange", + description="MPLS Loopback IP Address Range", + default="10.102.0.0/25", + ) + + # Netflow Settings + netflow_settings: NetflowSettingsModel = Field( + alias="netflowSettings", + description="Settings associated with netflow", + default_factory=NetflowSettingsModel, + ) + + # NX-API Settings + nxapi: bool = Field(description="Enable NX-API over HTTPS", default=False) + nxapi_http: bool = Field(alias="nxapiHttp", description="Enable NX-API over HTTP", default=False) + nxapi_http_port: int = Field(alias="nxapiHttpPort", description="HTTP port for NX-API", ge=1, le=65535, default=80) + nxapi_https_port: int = Field(alias="nxapiHttpsPort", description="HTTPS port for NX-API", ge=1, le=65535, default=443) + + # Performance Monitoring + performance_monitoring: bool = Field( + alias="performanceMonitoring", + description=("If enabled, switch metrics are collected through periodic SNMP" " polling. Alternative to real-time telemetry"), + default=False, + ) + + # Power Redundancy + power_redundancy_mode: PowerRedundancyModeEnum = Field( + alias="powerRedundancyMode", + description="Default Power Supply Mode for NX-OS Switches", + default=PowerRedundancyModeEnum.REDUNDANT, + ) + + # PTP + ptp: bool = Field(description="Enable Precision Time Protocol (PTP)", default=False) + ptp_domain_id: int = Field( + alias="ptpDomainId", + description=("Multiple Independent PTP Clocking Subdomains" " on a Single Network"), + default=0, + ) + ptp_loopback_id: int = Field( + alias="ptpLoopbackId", + description="Precision Time Protocol Source Loopback Id", + default=0, + ) + + # Backup / Restore + real_time_backup: Optional[bool] = Field( + alias="realTimeBackup", + description=("Hourly Fabric Backup only if there is any config deployment" " since last backup"), + default=None, + ) + + # Interface Statistics Collection + real_time_interface_statistics_collection: bool = Field( + alias="realTimeInterfaceStatisticsCollection", + description=("Enable Real Time Interface Statistics Collection." " Valid for NX-OS only"), + default=False, + ) + + # Scheduled Backup + scheduled_backup: Optional[bool] = Field( + alias="scheduledBackup", + description="Enable backup at the specified time daily", + default=None, + ) + scheduled_backup_time: str = Field( + alias="scheduledBackupTime", + description=("Time (UTC) in 24 hour format to take a daily backup" " if enabled (00:00 to 23:59)"), + default="", + ) + + # SNMP + snmp_trap: bool = Field( + alias="snmpTrap", + description="Configure Nexus Dashboard as a receiver for SNMP traps", + default=True, + ) + + # Sub-Interface + sub_interface_dot1q_range: str = Field( + alias="subInterfaceDot1qRange", + description=("Per aggregation dot1q range for VRF-Lite connectivity" " (minimum: 2, maximum: 4093)"), + default="2-511", + ) + + # Hypershield / Connectivity + connectivity_domain_name: Optional[str] = Field(alias="connectivityDomainName", description="Domain name to connect to Hypershield", default=None) + hypershield_connectivity_proxy_server: Optional[str] = Field( + alias="hypershieldConnectivityProxyServer", + description="IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication", + default=None, + ) + hypershield_connectivity_proxy_server_port: Optional[int] = Field( + alias="hypershieldConnectivityProxyServerPort", description="Proxy port number for communication with Hypershield", default=None + ) + hypershield_connectivity_source_intf: Optional[str] = Field( + alias="hypershieldConnectivitySourceIntf", description="Loopback interface on smart switch for communication with Hypershield", default=None + ) + + @field_validator("bgp_asn") + @classmethod + def validate_bgp_asn(cls, value: str) -> str: + """ + # Summary + + Validate BGP ASN format and range. + + ## Description + + Accepts either a plain integer ASN (1-4294967295) or dotted four-byte + ASN notation in the form ``MMMM.NNNN`` where both parts are in the + range 1-65535 / 0-65535 respectively. + + ## Raises + + - `ValueError` - If the value does not match the expected ASN format + """ + if not BGP_ASN_RE.match(value): + raise ValueError(f"Invalid BGP ASN '{value}'. " "Expected a plain integer (1-4294967295) or dotted notation (1-65535.0-65535).") + return value + + +class FabricExternalConnectivityModel(NDBaseModel): + """ + # Summary + + Complete model for creating a new External Connectivity fabric. + + This model combines all necessary components for fabric creation including + basic fabric properties, management settings, telemetry, and streaming configuration. + + ## Raises + + - `ValueError` - If required fields are missing or invalid + - `TypeError` - If field types don't match expected types + """ + + model_config = ConfigDict( + str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow" # Allow extra fields from API responses + ) + + identifiers: ClassVar[Optional[List[str]]] = ["fabric_name"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + + # Basic Fabric Properties + category: Literal["fabric"] = Field(description="Resource category", default="fabric") + fabric_name: str = Field(alias="name", description="Fabric name", min_length=1, max_length=64) + location: Optional[LocationModel] = Field(description="Geographic location of the fabric", default=None) + + # License and Operations + license_tier: LicenseTierEnum = Field( + alias="licenseTier", + description="License Tier value of a fabric.", + default=LicenseTierEnum.PREMIER, + ) + alert_suspend: AlertSuspendEnum = Field( + alias="alertSuspend", + description="Alert Suspend state configured on the fabric", + default=AlertSuspendEnum.DISABLED, + ) + telemetry_collection: bool = Field(alias="telemetryCollection", description="Enable telemetry collection", default=False) + telemetry_collection_type: TelemetryCollectionTypeEnum = Field( + alias="telemetryCollectionType", + description="Telemetry collection method.", + default=TelemetryCollectionTypeEnum.OUT_OF_BAND, + ) + telemetry_streaming_protocol: TelemetryStreamingProtocolEnum = Field( + alias="telemetryStreamingProtocol", + description="Telemetry Streaming Protocol.", + default=TelemetryStreamingProtocolEnum.IPV4, + ) + telemetry_source_interface: str = Field( + alias="telemetrySourceInterface", + description=("Telemetry Source Interface (VLAN id or Loopback id) only valid" " if Telemetry Collection is set to inBand"), + default="", + ) + telemetry_source_vrf: str = Field( + alias="telemetrySourceVrf", + description=("VRF over which telemetry is streamed, valid only if telemetry" " collection is set to inband"), + default="", + ) + security_domain: str = Field( + alias="securityDomain", + description="Security Domain associated with the fabric", + default="all", + ) + + # Core Management Configuration + management: Optional[ExternalConnectivityManagementModel] = Field(description="External Connectivity management configuration", default=None) + + # Optional Advanced Settings + telemetry_settings: Optional[TelemetrySettingsModel] = Field(alias="telemetrySettings", description="Telemetry configuration", default=None) + external_streaming_settings: ExternalStreamingSettingsModel = Field( + alias="externalStreamingSettings", description="External streaming settings", default_factory=ExternalStreamingSettingsModel + ) + + @field_validator("fabric_name") + @classmethod + def validate_fabric_name(cls, value: str) -> str: + """ + # Summary + + Validate fabric name format and characters. + + ## Raises + + - `ValueError` - If name contains invalid characters or format + """ + if not re.match(r"^[a-zA-Z0-9_-]+$", value): + raise ValueError(f"Fabric name can only contain letters, numbers, underscores, and hyphens, got: {value}") + + return value + + @model_validator(mode="after") + def validate_fabric_consistency(self) -> "FabricExternalConnectivityModel": + """ + # Summary + + Validate consistency between fabric settings and management configuration. + + ## Raises + + - `ValueError` - If fabric settings are inconsistent + """ + # Ensure management type matches model type + if self.management is not None and self.management.type != FabricTypeEnum.EXTERNAL_CONNECTIVITY: + raise ValueError(f"Management type must be {FabricTypeEnum.EXTERNAL_CONNECTIVITY}") + + # Propagate fabric name to management model + if self.management is not None: + self.management.name = self.fabric_name + + # Validate telemetry consistency + if self.telemetry_collection and self.telemetry_settings is None: + # Auto-create default telemetry settings if collection is enabled + self.telemetry_settings = TelemetrySettingsModel() + + return self + + # TODO: to generate from Fields (low priority) + @classmethod + def get_argument_spec(cls) -> Dict: + return dict( + state={ + "type": "str", + "default": "merged", + "choices": ["merged", "replaced", "deleted", "overridden"], + }, + config={"required": False, "type": "list", "elements": "dict"}, + ) + + +# Export all models for external use +__all__ = [ + "LocationModel", + "NetflowExporterModel", + "NetflowRecordModel", + "NetflowMonitorModel", + "NetflowSettingsModel", + "BootstrapSubnetModel", + "TelemetryFlowCollectionModel", + "TelemetryMicroburstModel", + "TelemetryAnalysisSettingsModel", + "TelemetryEnergyManagementModel", + "TelemetrySettingsModel", + "ExternalStreamingSettingsModel", + "ExternalConnectivityManagementModel", + "FabricExternalConnectivityModel", + "FabricTypeEnum", + "AlertSuspendEnum", + "LicenseTierEnum", + "CoppPolicyEnum", + "DhcpProtocolVersionEnum", + "PowerRedundancyModeEnum", +] diff --git a/plugins/module_utils/models/manage_fabric/manage_fabric_ibgp.py b/plugins/module_utils/models/manage_fabric/manage_fabric_ibgp.py new file mode 100644 index 00000000..c2ecb713 --- /dev/null +++ b/plugins/module_utils/models/manage_fabric/manage_fabric_ibgp.py @@ -0,0 +1,1195 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +# from datetime import datetime +from typing import List, Dict, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import NDNestedModel +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ConfigDict, + Field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.enums import ( + FabricTypeEnum, + AlertSuspendEnum, + LicenseTierEnum, + OverlayModeEnum, + ReplicationModeEnum, + LinkStateRoutingProtocolEnum, + CoppPolicyEnum, + FabricInterfaceTypeEnum, + GreenfieldDebugFlagEnum, + IsisLevelEnum, + SecurityGroupStatusEnum, + StpRootOptionEnum, + VpcPeerKeepAliveOptionEnum, + AimlQosPolicyEnum, + AllowVlanOnLeafTorPairingEnum, + BgpAuthenticationKeyTypeEnum, + DhcpProtocolVersionEnum, + DlbMixedModeDefaultEnum, + DlbModeEnum, + MacsecAlgorithmEnum, + MacsecCipherSuiteEnum, + PowerRedundancyModeEnum, + RendezvousPointCountEnum, + RendezvousPointModeEnum, + RouteReflectorCountEnum, + UnderlayMulticastGroupAddressLimitEnum, + VrfLiteAutoConfigEnum, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_common import ( + BGP_ASN_RE, + LocationModel, + NetflowExporterModel, + NetflowRecordModel, + NetflowMonitorModel, + NetflowSettingsModel, + BootstrapSubnetModel, + TelemetryFlowCollectionModel, + TelemetryMicroburstModel, + TelemetryAnalysisSettingsModel, + TelemetryEnergyManagementModel, + TelemetrySettingsModel, + ExternalStreamingSettingsModel, +) + +""" +# Comprehensive Pydantic models for iBGP VXLAN fabric management via Nexus Dashboard + +This module provides comprehensive Pydantic models for creating, updating, and deleting +iBGP VXLAN fabrics through the Nexus Dashboard Fabric Controller (NDFC) API. + +## Models Overview + +- `LocationModel` - Geographic location coordinates +- `NetflowExporterModel` - Netflow exporter configuration +- `NetflowRecordModel` - Netflow record configuration +- `NetflowMonitorModel` - Netflow monitor configuration +- `NetflowSettingsModel` - Complete netflow settings +- `BootstrapSubnetModel` - Bootstrap subnet configuration +- `TelemetryFlowCollectionModel` - Telemetry flow collection settings +- `TelemetrySettingsModel` - Complete telemetry configuration +- `ExternalStreamingSettingsModel` - External streaming configuration +- `VxlanIbgpManagementModel` - iBGP VXLAN specific management settings +- `FabricModel` - Complete fabric creation model +- `FabricDeleteModel` - Fabric deletion model + +## Usage + +```python +# Create a new iBGP VXLAN fabric +fabric_data = { + "name": "MyFabric", + "location": {"latitude": 37.7749, "longitude": -122.4194}, + "management": { + "type": "vxlanIbgp", + "bgp_asn": "65001", + "site_id": "65001" + } +} +fabric = FabricModel(**fabric_data) +``` +""" + + +class VxlanIbgpManagementModel(NDNestedModel): + """ + # Summary + + Comprehensive iBGP VXLAN fabric management configuration. + + This model contains all settings specific to iBGP VXLAN fabric types including + overlay configuration, underlay routing, multicast settings, and advanced features. + + ## Raises + + - `ValueError` - If BGP ASN, VLAN ranges, or IP ranges are invalid + - `TypeError` - If required string fields are not provided + """ + + model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow") + + # Fabric Type (required for discriminated union) + type: Literal[FabricTypeEnum.VXLAN_IBGP] = Field(description="Type of the fabric", default=FabricTypeEnum.VXLAN_IBGP) + + # Core iBGP Configuration + bgp_asn: str = Field(alias="bgpAsn", description="Autonomous system number 1-4294967295 | 1-65535[.0-65535]") + site_id: Optional[str] = Field(alias="siteId", description="For EVPN Multi-Site Support. Defaults to Fabric ASN", default="") + + # Name under management section is optional for backward compatibility, but if provided must be non-empty string + name: Optional[str] = Field(description="Fabric name", min_length=1, max_length=64, default="") + # border_count: Optional[int] = Field(alias="borderCount", description="Number of border switches", ge=0, le=32, default=0) + # breakout_spine_interfaces: Optional[bool] = Field(alias="breakoutSpineInterfaces", description="Enable breakout spine interfaces", default=False) + # designer_use_robot_password: Optional[bool] = Field(alias="designerUseRobotPassword", description="Use robot password for designer", default=False) + # leaf_count: Optional[int] = Field(alias="leafCount", description="Number of leaf switches", ge=1, le=128, default=1) + # spine_count: Optional[int] = Field(alias="spineCount", description="Number of spine switches", ge=1, le=32, default=1) + # vrf_lite_ipv6_subnet_range: Optional[str] = Field(alias="vrfLiteIpv6SubnetRange", description="VRF Lite IPv6 subnet range", default="fd00::a33:0/112") + # vrf_lite_ipv6_subnet_target_mask: Optional[int] = Field( + # alias="vrfLiteIpv6SubnetTargetMask", + # description="VRF Lite IPv6 subnet target mask", ge=112, le=128, default=126) + + # Network Addressing + bgp_loopback_ip_range: str = Field(alias="bgpLoopbackIpRange", description="Typically Loopback0 IP Address Range", default="10.2.0.0/22") + nve_loopback_ip_range: str = Field(alias="nveLoopbackIpRange", description="Typically Loopback1 IP Address Range", default="10.3.0.0/22") + anycast_rendezvous_point_ip_range: str = Field( + alias="anycastRendezvousPointIpRange", description="Anycast or Phantom RP IP Address Range", default="10.254.254.0/24" + ) + intra_fabric_subnet_range: str = Field( + alias="intraFabricSubnetRange", description="Address range to assign numbered and peer link SVI IPs", default="10.4.0.0/16" + ) + + # VLAN and VNI Ranges + l2_vni_range: str = Field(alias="l2VniRange", description="Overlay network identifier range (minimum: 1, maximum: 16777214)", default="30000-49000") + l3_vni_range: str = Field(alias="l3VniRange", description="Overlay VRF identifier range (minimum: 1, maximum: 16777214)", default="50000-59000") + network_vlan_range: str = Field( + alias="networkVlanRange", description="Per Switch Overlay Network VLAN Range (minimum: 2, maximum: 4094)", default="2300-2999" + ) + vrf_vlan_range: str = Field(alias="vrfVlanRange", description="Per Switch Overlay VRF VLAN Range (minimum: 2, maximum: 4094)", default="2000-2299") + + # Overlay Configuration + overlay_mode: OverlayModeEnum = Field( + alias="overlayMode", description="Overlay Mode. VRF/Network configuration using config-profile or CLI", default=OverlayModeEnum.CLI + ) + replication_mode: ReplicationModeEnum = Field( + alias="replicationMode", description="Replication Mode for BUM Traffic", default=ReplicationModeEnum.MULTICAST + ) + multicast_group_subnet: str = Field( + alias="multicastGroupSubnet", + description=("Multicast pool prefix between 8 to 30. A multicast group ipv4 from this pool is used for BUM traffic for " "each overlay network."), + default="239.1.1.0/25", + ) + auto_generate_multicast_group_address: bool = Field( + alias="autoGenerateMulticastGroupAddress", + description="Generate a new multicast group address from the multicast pool using a round-robin approach", + default=False, + ) + underlay_multicast_group_address_limit: UnderlayMulticastGroupAddressLimitEnum = Field( + alias="underlayMulticastGroupAddressLimit", + description=("The maximum supported value is 128 for NX-OS version 10.2(1) or earlier " "and 512 for versions above 10.2(1)"), + default=UnderlayMulticastGroupAddressLimitEnum.V_128, + ) + tenant_routed_multicast: bool = Field(alias="tenantRoutedMulticast", description="For Overlay ipv4 Multicast Support In VXLAN Fabrics", default=False) + + # Underlay Configuration + link_state_routing_protocol: LinkStateRoutingProtocolEnum = Field( + alias="linkStateRoutingProtocol", description="Underlay Routing Protocol. Used for Spine-Leaf Connectivity", default=LinkStateRoutingProtocolEnum.OSPF + ) + ospf_area_id: str = Field(alias="ospfAreaId", description="OSPF Area Id in IP address format", default="0.0.0.0") + fabric_interface_type: FabricInterfaceTypeEnum = Field( + alias="fabricInterfaceType", description="Numbered(Point-to-Point) or unNumbered", default=FabricInterfaceTypeEnum.P2P + ) + + # Advanced Features + target_subnet_mask: int = Field(alias="targetSubnetMask", description="Mask for underlay subnet IP range", ge=24, le=31, default=30) + anycast_gateway_mac: str = Field(alias="anycastGatewayMac", description="Shared anycast gateway MAC address for all VTEPs", default="2020.0000.00aa") + fabric_mtu: int = Field(alias="fabricMtu", description="Intra Fabric Interface MTU. Must be an even number", ge=1500, le=9216, default=9216) + l2_host_interface_mtu: int = Field( + alias="l2HostInterfaceMtu", description="Layer 2 host interface MTU. Must be an even number", ge=1500, le=9216, default=9216 + ) + + # VPC Configuration + vpc_domain_id_range: str = Field( + alias="vpcDomainIdRange", description="vPC Domain id range (minimum: 1, maximum: 1000) to use for new pairings", default="1-1000" + ) + vpc_peer_link_vlan: str = Field(alias="vpcPeerLinkVlan", description="VLAN range (minimum: 2, maximum: 4094) for vPC Peer Link SVI", default="3600") + vpc_peer_link_enable_native_vlan: bool = Field(alias="vpcPeerLinkEnableNativeVlan", description="Enable VpcPeer Link for Native Vlan", default=False) + vpc_peer_keep_alive_option: VpcPeerKeepAliveOptionEnum = Field( + alias="vpcPeerKeepAliveOption", description="Use vPC Peer Keep Alive with Loopback or Management", default=VpcPeerKeepAliveOptionEnum.MANAGEMENT + ) + vpc_auto_recovery_timer: int = Field(alias="vpcAutoRecoveryTimer", description="vPC auto recovery timer (in seconds)", ge=240, le=3600, default=360) + vpc_delay_restore_timer: int = Field(alias="vpcDelayRestoreTimer", description="vPC delay restore timer (in seconds)", ge=1, le=3600, default=150) + + # Loopback Configuration + bgp_loopback_id: int = Field(alias="bgpLoopbackId", description="Underlay Routing Loopback Id", ge=0, le=1023, default=0) + nve_loopback_id: int = Field( + alias="nveLoopbackId", + description="Underlay VTEP loopback Id associated with the Network Virtualization Edge (nve) interface", + ge=0, + le=1023, + default=1, + ) + route_reflector_count: RouteReflectorCountEnum = Field( + alias="routeReflectorCount", description="Number of spines acting as Route-Reflectors", default=RouteReflectorCountEnum.TWO + ) + + # Templates + vrf_template: str = Field(alias="vrfTemplate", description="Default overlay VRF template for leafs", default="Default_VRF_Universal") + network_template: str = Field(alias="networkTemplate", description="Default overlay network template for leafs", default="Default_Network_Universal") + vrf_extension_template: str = Field( + alias="vrfExtensionTemplate", description="Default overlay VRF template for borders", default="Default_VRF_Extension_Universal" + ) + network_extension_template: str = Field( + alias="networkExtensionTemplate", description="Default overlay network template for borders", default="Default_Network_Extension_Universal" + ) + + # Optional Advanced Settings + performance_monitoring: bool = Field( + alias="performanceMonitoring", + description=("If enabled, switch metrics are collected through periodic SNMP polling. " "Alternative to real-time telemetry"), + default=False, + ) + tenant_dhcp: bool = Field(alias="tenantDhcp", description="Enable Tenant DHCP", default=True) + advertise_physical_ip: bool = Field( + alias="advertisePhysicalIp", description="For Primary VTEP IP Advertisement As Next-Hop Of Prefix Routes", default=False + ) + advertise_physical_ip_on_border: bool = Field( + alias="advertisePhysicalIpOnBorder", + description=("Enable advertise-pip on vPC borders and border gateways only. Applicable only when vPC advertise-pip is " "not enabled"), + default=True, + ) + + # Protocol Settings + bgp_authentication: bool = Field(alias="bgpAuthentication", description="Enables or disables the BGP authentication", default=False) + bgp_authentication_key_type: BgpAuthenticationKeyTypeEnum = Field( + alias="bgpAuthenticationKeyType", + description="BGP key encryption type: 3 - 3DES, 6 - Cisco type 6, 7 - Cisco type 7", + default=BgpAuthenticationKeyTypeEnum.THREE_DES, + ) + bfd: bool = Field(description="Enable BFD. Valid for IPv4 Underlay only", default=False) + bfd_ibgp: bool = Field(alias="bfdIbgp", description="Enable BFD For iBGP", default=False) + + # Management Settings + nxapi: bool = Field(description="Enable NX-API over HTTPS", default=False) + nxapi_http: bool = Field(alias="nxapiHttp", description="Enable NX-API over HTTP", default=False) + nxapi_https_port: int = Field(alias="nxapiHttpsPort", description="HTTPS port for NX-API", ge=1, le=65535, default=443) + nxapi_http_port: int = Field(alias="nxapiHttpPort", description="HTTP port for NX-API", ge=1, le=65535, default=80) + + # Bootstrap Settings + day0_bootstrap: bool = Field(alias="day0Bootstrap", description="Automatic IP Assignment For POAP", default=False) + bootstrap_subnet_collection: List[BootstrapSubnetModel] = Field( + alias="bootstrapSubnetCollection", description="List of IPv4 or IPv6 subnets to be used for bootstrap", default_factory=list + ) + + # Netflow Settings + netflow_settings: NetflowSettingsModel = Field( + alias="netflowSettings", description="Settings associated with netflow", default_factory=NetflowSettingsModel + ) + + # Multicast Settings + rendezvous_point_count: RendezvousPointCountEnum = Field( + alias="rendezvousPointCount", description="Number of spines acting as Rendezvous-Points (RPs)", default=RendezvousPointCountEnum.TWO + ) + rendezvous_point_loopback_id: int = Field(alias="rendezvousPointLoopbackId", description="Rendezvous point loopback Id", ge=0, le=1023, default=254) + + # System Settings + snmp_trap: bool = Field(alias="snmpTrap", description="Configure ND as a receiver for SNMP traps", default=True) + cdp: bool = Field(description="Enable CDP on management interface", default=False) + real_time_interface_statistics_collection: bool = Field( + alias="realTimeInterfaceStatisticsCollection", description="Enable Real Time Interface Statistics Collection. Valid for NX-OS only", default=False + ) + tcam_allocation: bool = Field( + alias="tcamAllocation", description="TCAM commands are automatically generated for VxLAN and vPC Fabric Peering when Enabled", default=True + ) + + # VPC Extended Configuration + vpc_peer_link_port_channel_id: str = Field( + alias="vpcPeerLinkPortChannelId", description="vPC Peer Link Port Channel ID (minimum: 1, maximum: 4096)", default="500" + ) + vpc_ipv6_neighbor_discovery_sync: bool = Field( + alias="vpcIpv6NeighborDiscoverySync", description="Enable IPv6 ND synchronization between vPC peers", default=True + ) + vpc_layer3_peer_router: bool = Field(alias="vpcLayer3PeerRouter", description="Enable Layer-3 Peer-Router on all Leaf switches", default=True) + vpc_tor_delay_restore_timer: int = Field(alias="vpcTorDelayRestoreTimer", description="vPC delay restore timer for ToR switches (in seconds)", default=30) + fabric_vpc_domain_id: bool = Field( + alias="fabricVpcDomainId", description="Enable the same vPC Domain Id for all vPC Pairs. Not Recommended.", default=False + ) + shared_vpc_domain_id: int = Field(alias="sharedVpcDomainId", description="vPC Domain Id to be used on all vPC pairs", default=1) + fabric_vpc_qos: bool = Field(alias="fabricVpcQos", description="Qos on spines for guaranteed delivery of vPC Fabric Peering communication", default=False) + fabric_vpc_qos_policy_name: str = Field( + alias="fabricVpcQosPolicyName", description="Qos Policy name should be same on all spines", default="spine_qos_for_fabric_vpc_peering" + ) + enable_peer_switch: bool = Field(alias="enablePeerSwitch", description="Enable the vPC peer-switch feature on ToR switches", default=False) + + # Bootstrap / Day-0 / DHCP + local_dhcp_server: bool = Field(alias="localDhcpServer", description="Automatic IP Assignment For POAP From Local DHCP Server", default=False) + dhcp_protocol_version: DhcpProtocolVersionEnum = Field( + alias="dhcpProtocolVersion", description="IP protocol version for Local DHCP Server", default=DhcpProtocolVersionEnum.DHCPV4 + ) + dhcp_start_address: str = Field(alias="dhcpStartAddress", description="DHCP Scope Start Address For Switch POAP", default="") + dhcp_end_address: str = Field(alias="dhcpEndAddress", description="DHCP Scope End Address For Switch POAP", default="") + management_gateway: str = Field(alias="managementGateway", description="Default Gateway For Management VRF On The Switch", default="") + management_ipv4_prefix: int = Field(alias="managementIpv4Prefix", description="Switch Mgmt IP Subnet Prefix if ipv4", default=24) + management_ipv6_prefix: int = Field(alias="managementIpv6Prefix", description="Switch Management IP Subnet Prefix if ipv6", default=64) + extra_config_nxos_bootstrap: str = Field( + alias="extraConfigNxosBootstrap", description="Additional CLIs required during device bootup/login e.g. AAA/Radius", default="" + ) + unnumbered_bootstrap_loopback_id: int = Field( + alias="unNumberedBootstrapLoopbackId", description="Bootstrap Seed Switch Loopback Interface ID", default=253 + ) + unnumbered_dhcp_start_address: str = Field( + alias="unNumberedDhcpStartAddress", + description="Switch Loopback DHCP Scope Start Address. Must be a subset of IGP/BGP Loopback Prefix Pool", + default="", + ) + unnumbered_dhcp_end_address: str = Field( + alias="unNumberedDhcpEndAddress", description="Switch Loopback DHCP Scope End Address. Must be a subset of IGP/BGP Loopback Prefix Pool", default="" + ) + inband_management: bool = Field(alias="inbandManagement", description="Manage switches with only Inband connectivity", default=False) + inband_dhcp_servers: List[str] = Field(alias="inbandDhcpServers", description="List of external DHCP server IP addresses (Max 3)", default_factory=list) + seed_switch_core_interfaces: List[str] = Field( + alias="seedSwitchCoreInterfaces", description="Seed switch fabric interfaces. Core-facing interface list on seed switch", default_factory=list + ) + spine_switch_core_interfaces: List[str] = Field( + alias="spineSwitchCoreInterfaces", description="Spine switch fabric interfaces. Core-facing interface list on all spines", default_factory=list + ) + + # Backup / Restore + real_time_backup: bool = Field(alias="realTimeBackup", description="Backup hourly only if there is any config deployment since last backup", default=False) + scheduled_backup: bool = Field(alias="scheduledBackup", description="Enable backup at the specified time daily", default=False) + scheduled_backup_time: str = Field( + alias="scheduledBackupTime", description="Time (UTC) in 24 hour format to take a daily backup if enabled (00:00 to 23:59)", default="" + ) + + # IPv6 / Dual-Stack + underlay_ipv6: bool = Field(alias="underlayIpv6", description="If not enabled, IPv4 underlay is used", default=False) + ipv6_multicast_group_subnet: str = Field( + alias="ipv6MulticastGroupSubnet", description="IPv6 Multicast address with prefix 112 to 128", default="ff1e::/121" + ) + tenant_routed_multicast_ipv6: bool = Field( + alias="tenantRoutedMulticastIpv6", description="For Overlay IPv6 Multicast Support In VXLAN Fabrics", default=False + ) + ipv6_link_local: bool = Field(alias="ipv6LinkLocal", description="If not enabled, Spine-Leaf interfaces will use global IPv6 addresses", default=True) + ipv6_subnet_target_mask: int = Field(alias="ipv6SubnetTargetMask", description="Mask for Underlay Subnet IPv6 Range", default=126) + ipv6_subnet_range: str = Field( + alias="ipv6SubnetRange", description="Underlay Subnet ipv6 range to assign Numbered and Peer Link SVI IPs", default="fd00::a04:0/112" + ) + bgp_loopback_ipv6_range: str = Field(alias="bgpLoopbackIpv6Range", description="Typically Loopback0 IPv6 Address Range", default="fd00::a02:0/119") + nve_loopback_ipv6_range: str = Field( + alias="nveLoopbackIpv6Range", description="Typically Loopback1 and Anycast Loopback IPv6 Address Range", default="fd00::a03:0/118" + ) + ipv6_anycast_rendezvous_point_ip_range: str = Field( + alias="ipv6AnycastRendezvousPointIpRange", description="Anycast RP IPv6 Address Range", default="fd00::254:254:0/118" + ) + + # Multicast / Rendezvous Point Extended + mvpn_vrf_route_import_id: bool = Field( + alias="mvpnVrfRouteImportId", description="Enable MVPN VRI ID Generation For Tenant Routed Multicast With IPv4 Underlay", default=True + ) + mvpn_vrf_route_import_id_range: str = Field( + alias="mvpnVrfRouteImportIdRange", + description=( + "MVPN VRI ID (minimum: 1, maximum: 65535) for vPC, applicable when TRM enabled with IPv6 underlay, or " + "mvpnVrfRouteImportId enabled with IPv4 underlay" + ), + default="", + ) + vrf_route_import_id_reallocation: bool = Field( + alias="vrfRouteImportIdReallocation", description="One time VRI ID re-allocation based on 'MVPN VRI ID Range'", default=False + ) + l3vni_multicast_group: str = Field( + alias="l3vniMulticastGroup", description="Default Underlay Multicast group IPv4 address assigned for every overlay VRF", default="239.1.1.0" + ) + l3_vni_ipv6_multicast_group: str = Field( + alias="l3VniIpv6MulticastGroup", description="Default Underlay Multicast group IP6 address assigned for every overlay VRF", default="ff1e::" + ) + rendezvous_point_mode: RendezvousPointModeEnum = Field( + alias="rendezvousPointMode", description="Multicast rendezvous point Mode. For ipv6 underlay, please use asm only", default=RendezvousPointModeEnum.ASM + ) + phantom_rendezvous_point_loopback_id1: int = Field( + alias="phantomRendezvousPointLoopbackId1", description="Underlay phantom rendezvous point loopback primary Id for PIM Bi-dir deployments", default=2 + ) + phantom_rendezvous_point_loopback_id2: int = Field( + alias="phantomRendezvousPointLoopbackId2", description="Underlay phantom rendezvous point loopback secondary Id for PIM Bi-dir deployments", default=3 + ) + phantom_rendezvous_point_loopback_id3: int = Field( + alias="phantomRendezvousPointLoopbackId3", description="Underlay phantom rendezvous point loopback tertiary Id for PIM Bi-dir deployments", default=4 + ) + phantom_rendezvous_point_loopback_id4: int = Field( + alias="phantomRendezvousPointLoopbackId4", description="Underlay phantom rendezvous point loopback quaternary Id for PIM Bi-dir deployments", default=5 + ) + anycast_loopback_id: int = Field( + alias="anycastLoopbackId", description="Underlay Anycast Loopback Id. Used for vPC Peering in VXLANv6 Fabrics", default=10 + ) + + # VRF Lite / Sub-Interface + sub_interface_dot1q_range: str = Field( + alias="subInterfaceDot1qRange", description="Per aggregation dot1q range for VRF-Lite connectivity (minimum: 2, maximum: 4093)", default="2-511" + ) + vrf_lite_auto_config: VrfLiteAutoConfigEnum = Field( + alias="vrfLiteAutoConfig", + description=( + "VRF Lite Inter-Fabric Connection Deployment Options. If 'back2BackAndToExternal' is selected, VRF Lite " + "IFCs are auto created between border devices of two Easy Fabrics, and between border devices in Easy " + "Fabric and edge routers in External Fabric. The IP address is taken from the 'VRF Lite Subnet IP Range' " + "pool." + ), + default=VrfLiteAutoConfigEnum.MANUAL, + ) + vrf_lite_subnet_range: str = Field(alias="vrfLiteSubnetRange", description="Address range to assign P2P Interfabric Connections", default="10.33.0.0/16") + vrf_lite_subnet_target_mask: int = Field(alias="vrfLiteSubnetTargetMask", description="VRF Lite Subnet Mask", default=30) + auto_unique_vrf_lite_ip_prefix: bool = Field( + alias="autoUniqueVrfLiteIpPrefix", + description=( + "When enabled, IP prefix allocated to the VRF LITE IFC is not reused on VRF extension over VRF LITE IFC. " + "Instead, unique IP Subnet is allocated for each VRF extension over VRF LITE IFC." + ), + default=False, + ) + auto_symmetric_vrf_lite: bool = Field( + alias="autoSymmetricVrfLite", + description=( + "Whether to auto generate VRF LITE sub-interface and BGP peering configuration on managed " + "neighbor devices. If set, auto created VRF Lite IFC links will have " + "'Auto Deploy for Peer' enabled." + ), + default=False, + ) + auto_vrf_lite_default_vrf: bool = Field( + alias="autoVrfLiteDefaultVrf", + description=( + "For ipv4 underlay, whether to auto generate BGP peering in Default VRF for VRF Lite IFC auto deployment " + "option. If set, will auto create VRF Lite Inter-Fabric links with 'Auto Deploy Default VRF' knob enabled" + ), + default=False, + ) + auto_symmetric_default_vrf: bool = Field( + alias="autoSymmetricDefaultVrf", + description=( + "Whether to auto generate Default VRF interface and BGP peering configuration on managed neighbor devices. " + "If set, auto created VRF Lite IFC links will have 'Auto Deploy Default VRF for Peer' enabled." + ), + default=False, + ) + default_vrf_redistribution_bgp_route_map: str = Field( + alias="defaultVrfRedistributionBgpRouteMap", + description=("Route Map used to redistribute BGP routes to IGP in default vrf " "in auto created VRF Lite IFC links"), + default="extcon-rmap-filter", + ) + + # Per-VRF Loopback + per_vrf_loopback_auto_provision: bool = Field( + alias="perVrfLoopbackAutoProvision", + description=( + "Auto provision an IPv4 loopback on a VTEP on VRF attachment. Note: Enabling this option auto-provisions " + "loopback on existing VRF attachments and also when Edit, QuickAttach, or Multiattach actions are " + "performed. Provisioned loopbacks cannot be deleted until VRFs are unattached." + ), + default=False, + ) + per_vrf_loopback_ip_range: str = Field( + alias="perVrfLoopbackIpRange", description="Prefix pool to assign IPv4 addresses to loopbacks on VTEPs on a per VRF basis", default="10.5.0.0/22" + ) + per_vrf_loopback_auto_provision_ipv6: bool = Field( + alias="perVrfLoopbackAutoProvisionIpv6", description="Auto provision an IPv6 loopback on a VTEP on VRF attachment.", default=False + ) + per_vrf_loopback_ipv6_range: str = Field( + alias="perVrfLoopbackIpv6Range", description="Prefix pool to assign IPv6 addresses to loopbacks on VTEPs on a per VRF basis", default="fd00::a05:0/112" + ) + per_vrf_unique_loopback_auto_provision: bool = Field( + alias="perVrfUniqueLoopbackAutoProvision", + description=( + "Auto provision a unique IPV4 loopback on a VTEP on VRF attachment. Note: Enabling this option " + "auto-provisions unique loopback in the fabric per request. This option and per VRF per VTEP loopback " + "auto-provisioning are mutually exclusive. Provisioned unique loopbacks will be released upon VRF " + "unattachment or per request." + ), + default=False, + ) + per_vrf_unique_loopback_ip_range: str = Field( + alias="perVrfUniqueLoopbackIpRange", + description="Prefix pool to assign unique IPv4 addresses to loopbacks on VTEPs on a per VRF basis", + default="10.6.0.0/22", + ) + per_vrf_unique_loopback_auto_provision_v6: bool = Field( + alias="perVrfUniqueLoopbackAutoProvisionV6", description="Auto provision a unique IPV6 loopback on a VTEP on VRF attachment.", default=False + ) + per_vrf_unique_loopback_ipv6_range: str = Field( + alias="perVrfUniqueLoopbackIpv6Range", + description="Prefix pool to assign unique IPv6 addresses to loopbacks on VTEPs on a per VRF basis", + default="fd00::a06:0/112", + ) + + # Authentication — BGP Extended + bgp_authentication_key: str = Field(alias="bgpAuthenticationKey", description="Encrypted BGP authentication key based on type", default="") + + # Authentication — PIM + pim_hello_authentication: bool = Field(alias="pimHelloAuthentication", description="Valid for IPv4 Underlay only", default=False) + pim_hello_authentication_key: str = Field(alias="pimHelloAuthenticationKey", description="3DES Encrypted", default="") + + # Authentication — BFD + bfd_authentication: bool = Field(alias="bfdAuthentication", description="Enable BFD Authentication. Valid for P2P Interfaces only", default=False) + bfd_authentication_key_id: int = Field(alias="bfdAuthenticationKeyId", description="BFD Authentication Key ID", default=100) + bfd_authentication_key: str = Field(alias="bfdAuthenticationKey", description="Encrypted SHA1 secret value", default="") + bfd_ospf: bool = Field(alias="bfdOspf", description="Enable BFD For OSPF", default=False) + bfd_isis: bool = Field(alias="bfdIsis", description="Enable BFD For ISIS", default=False) + bfd_pim: bool = Field(alias="bfdPim", description="Enable BFD For PIM", default=False) + + # Authentication — OSPF + ospf_authentication: bool = Field(alias="ospfAuthentication", description="Enable OSPF Authentication", default=False) + ospf_authentication_key_id: int = Field(alias="ospfAuthenticationKeyId", description="(Min:0, Max:255)", default=127) + ospf_authentication_key: str = Field(alias="ospfAuthenticationKey", description="OSPF Authentication Key. 3DES Encrypted", default="") + + # IS-IS + isis_level: IsisLevelEnum = Field(alias="isisLevel", description="IS-IS Level", default=IsisLevelEnum.LEVEL_2) + isis_area_number: str = Field( + alias="isisAreaNumber", + description=( + "NET in form of XX.<4-hex-digit Custom Area Number>.XXXX.XXXX.XXXX.00, default Area Number " + "is 0001. If area number in existing NETs matches the previous area number set in fabric " + "settings and is different from the " + "current area number, these NETs will be updated by Recalculate and Deploy." + ), + default="0001", + ) + isis_point_to_point: bool = Field( + alias="isisPointToPoint", description="This will enable network point-to-point on fabric interfaces which are numbered", default=True + ) + isis_authentication: bool = Field(alias="isisAuthentication", description="Enable IS-IS Authentication", default=False) + isis_authentication_keychain_name: str = Field(alias="isisAuthenticationKeychainName", description="IS-IS Authentication Keychain Name", default="") + isis_authentication_keychain_key_id: int = Field(alias="isisAuthenticationKeychainKeyId", description="IS-IS Authentication Key ID", default=127) + isis_authentication_key: str = Field(alias="isisAuthenticationKey", description="IS-IS Authentication Key. Cisco Type 7 Encrypted", default="") + isis_overload: bool = Field( + alias="isisOverload", description="Set IS-IS Overload Bit. When enabled, set the overload bit for an elapsed time after a reload", default=True + ) + isis_overload_elapse_time: int = Field( + alias="isisOverloadElapseTime", description="IS-IS Overload Bit Elapsed Time. Clear the overload bit after an elapsed time in seconds", default=60 + ) + + # MACsec + macsec: bool = Field( + description=( + "Enable MACsec in the fabric. MACsec fabric parameters are used for configuring MACsec on a fabric link if " "MACsec is enabled on the link." + ), + default=False, + ) + macsec_cipher_suite: MacsecCipherSuiteEnum = Field( + alias="macsecCipherSuite", description="Configure Cipher Suite", default=MacsecCipherSuiteEnum.GCM_AES_XPN_256 + ) + macsec_key_string: str = Field(alias="macsecKeyString", description="MACsec Primary Key String. Cisco Type 7 Encrypted Octet String", default="") + macsec_algorithm: MacsecAlgorithmEnum = Field( + alias="macsecAlgorithm", description="MACsec Primary Cryptographic Algorithm. AES_128_CMAC or AES_256_CMAC", default=MacsecAlgorithmEnum.AES_128_CMAC + ) + macsec_fallback_key_string: str = Field( + alias="macsecFallbackKeyString", description="MACsec Fallback Key String. Cisco Type 7 Encrypted Octet String", default="" + ) + macsec_fallback_algorithm: MacsecAlgorithmEnum = Field( + alias="macsecFallbackAlgorithm", + description="MACsec Fallback Cryptographic Algorithm. AES_128_CMAC or AES_256_CMAC", + default=MacsecAlgorithmEnum.AES_128_CMAC, + ) + macsec_report_timer: int = Field(alias="macsecReportTimer", description="MACsec Operational Status periodic report timer in minutes", default=5) + + # VRF Lite MACsec + vrf_lite_macsec: bool = Field( + alias="vrfLiteMacsec", + description=( + "Enable MACsec on DCI links. DCI MACsec fabric parameters are used for configuring MACsec on a DCI link if " + "'Use Link MACsec Setting' is disabled on the link." + ), + default=False, + ) + vrf_lite_macsec_cipher_suite: MacsecCipherSuiteEnum = Field( + alias="vrfLiteMacsecCipherSuite", description="DCI MACsec Cipher Suite", default=MacsecCipherSuiteEnum.GCM_AES_XPN_256 + ) + vrf_lite_macsec_key_string: str = Field( + alias="vrfLiteMacsecKeyString", description="DCI MACsec Primary Key String. Cisco Type 7 Encrypted Octet String", default="" + ) + vrf_lite_macsec_algorithm: MacsecAlgorithmEnum = Field( + alias="vrfLiteMacsecAlgorithm", description="DCI MACsec Primary Cryptographic Algorithm", default=MacsecAlgorithmEnum.AES_128_CMAC + ) + vrf_lite_macsec_fallback_key_string: str = Field( + alias="vrfLiteMacsecFallbackKeyString", + description=("DCI MACsec Fallback Key String. Cisco Type 7 Encrypted Octet String. " "This parameter is used when DCI link has QKD disabled."), + default="", + ) + vrf_lite_macsec_fallback_algorithm: MacsecAlgorithmEnum = Field( + alias="vrfLiteMacsecFallbackAlgorithm", + description="AES_128_CMAC or AES_256_CMAC. This parameter is used when DCI link has QKD disabled.", + default=MacsecAlgorithmEnum.AES_128_CMAC, + ) + + # Quantum Key Distribution / Trustpoint + quantum_key_distribution: bool = Field( + alias="quantumKeyDistribution", + description=("Enable Data Center Interconnect Media Access Control Security " "with Quantum Key Distribution config"), + default=False, + ) + quantum_key_distribution_profile_name: str = Field( + alias="quantumKeyDistributionProfileName", description="Name of crypto profile (Max Size 63)", default="" + ) + key_management_entity_server_ip: str = Field(alias="keyManagementEntityServerIp", description="Key Management Entity server ipv4 address", default="") + key_management_entity_server_port: int = Field(alias="keyManagementEntityServerPort", description="Key Management Entity server port number", default=0) + trustpoint_label: str = Field(alias="trustpointLabel", description="Tls authentication type trustpoint label", default="") + skip_certificate_verification: bool = Field(alias="skipCertificateVerification", description="Skip verification of incoming certificate", default=False) + + # BGP / Routing Enhancements + auto_bgp_neighbor_description: bool = Field(alias="autoBgpNeighborDescription", description="Generate BGP EVPN Neighbor Description", default=True) + ibgp_peer_template: str = Field( + alias="ibgpPeerTemplate", + description=( + "Specifies the iBGP Peer-Template config used for Route Reflectors and spines with border " + "or border gateway role. This field should begin with ' template peer' or " + "' template peer-session'. This must have 2 " + "leading spaces. Note ! All configs should strictly match show run output, with respect to case and " + "newlines. Any mismatches will yield unexpected diffs during deploy." + ), + default="", + ) + leaf_ibgp_peer_template: str = Field( + alias="leafIbgpPeerTemplate", + description=( + "Specifies the config used for leaf, border or border gateway. If this field is empty, the peer template " + "defined in iBGP Peer-Template Config is used on all BGP enabled devices (RRs, leafs, border or border " + "gateway roles). This field should begin with ' template peer' or ' template peer-session'. This must " + "have 2 leading spaces. Note ! All configs should strictly match 'show run' output, with respect to case " + "and newlines. Any mismatches will yield unexpected diffs during deploy." + ), + default="", + ) + link_state_routing_tag: str = Field(alias="linkStateRoutingTag", description="Underlay routing protocol process tag", default="UNDERLAY") + static_underlay_ip_allocation: bool = Field( + alias="staticUnderlayIpAllocation", description="Checking this will disable Dynamic Underlay IP Address Allocations", default=False + ) + router_id_range: str = Field(alias="routerIdRange", description="BGP Router ID Range in IPv4 subnet format used for IPv6 Underlay.", default="10.2.0.0/23") + + # Security Group Tags (SGT) + security_group_tag: bool = Field(alias="securityGroupTag", description="Security group can be enabled only with cli overlay mode", default=False) + security_group_tag_prefix: str = Field(alias="securityGroupTagPrefix", description="Prefix to be used when a new security group is created", default="SG_") + security_group_tag_mac_segmentation: bool = Field( + alias="securityGroupTagMacSegmentation", description="Enable MAC based segmentation for security groups", default=False + ) + security_group_tag_id_range: str = Field( + alias="securityGroupTagIdRange", description="Security group tag (SGT) identifier range (minimum: 16, maximum: 65535)", default="10000-14000" + ) + security_group_tag_preprovision: bool = Field( + alias="securityGroupTagPreprovision", description="Generate security groups configuration for non-enforced VRFs", default=False + ) + security_group_status: SecurityGroupStatusEnum = Field( + alias="securityGroupStatus", description="Security group status", default=SecurityGroupStatusEnum.DISABLED + ) + + # Queuing / QoS + default_queuing_policy: bool = Field(alias="defaultQueuingPolicy", description="Enable Default Queuing Policies", default=False) + default_queuing_policy_cloudscale: str = Field( + alias="defaultQueuingPolicyCloudscale", + description="Queuing Policy for all 92xx, -EX, -FX, -FX2, -FX3, -GX series switches in the fabric", + default="queuing_policy_default_8q_cloudscale", + ) + default_queuing_policy_r_series: str = Field( + alias="defaultQueuingPolicyRSeries", description="Queueing policy for all Nexus R-series switches", default="queuing_policy_default_r_series" + ) + default_queuing_policy_other: str = Field( + alias="defaultQueuingPolicyOther", description="Queuing Policy for all other switches in the fabric", default="queuing_policy_default_other" + ) + aiml_qos: bool = Field( + alias="aimlQos", + description=("Configures QoS and Queuing Policies specific to N9K Cloud Scale (CS) & Silicon One (S1) switch fabric for " "AI network workloads"), + default=False, + ) + aiml_qos_policy: AimlQosPolicyEnum = Field( + alias="aimlQosPolicy", + description=("Queuing Policy based on predominant fabric link speed: 800G / 400G / 100G / 25G. User-defined allows for " "custom configuration."), + default=AimlQosPolicyEnum.V_400G, + ) + roce_v2: str = Field( + alias="roceV2", + description=( + "DSCP for RDMA traffic: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43,cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="26", + ) + cnp: str = Field( + description=( + "DSCP value for Congestion Notification: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43,cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="48", + ) + wred_min: int = Field(alias="wredMin", description="WRED minimum threshold (in kbytes)", default=950) + wred_max: int = Field(alias="wredMax", description="WRED maximum threshold (in kbytes)", default=3000) + wred_drop_probability: int = Field(alias="wredDropProbability", description="Drop probability %", default=7) + wred_weight: int = Field(alias="wredWeight", description="Influences how quickly WRED reacts to queue depth changes", default=0) + bandwidth_remaining: int = Field(alias="bandwidthRemaining", description="Percentage of remaining bandwidth allocated to AI traffic queues", default=50) + dlb: bool = Field( + description=( + "Enables fabric-level Dynamic Load Balancing (DLB) configuration. Note: Inter-Switch-Links (ISL) will be " "configured as DLB Interfaces" + ), + default=False, + ) + dlb_mode: DlbModeEnum = Field( + alias="dlbMode", + description=( + "Select system-wide flowlet, per-packet (packet spraying) or policy driven mixed mode. Note: Mixed mode is " + "supported on Silicon One (S1) platform only." + ), + default=DlbModeEnum.FLOWLET, + ) + dlb_mixed_mode_default: DlbMixedModeDefaultEnum = Field( + alias="dlbMixedModeDefault", description="Default load balancing mode for policy driven mixed mode DLB", default=DlbMixedModeDefaultEnum.ECMP + ) + flowlet_aging: int = Field( + alias="flowletAging", + description=( + "Flowlet aging timer in microseconds. Valid range depends on platform: Cloud Scale (CS)=1-2000000 (default " + "500), Silicon One (S1)=1-1024 (default 256)" + ), + default=1, + ) + flowlet_dscp: str = Field( + alias="flowletDscp", + description=( + "DSCP values for flowlet load balancing: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43,cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="", + ) + per_packet_dscp: str = Field( + alias="perPacketDscp", + description=( + "DSCP values for per-packet load balancing: numeric (0-63) with ranges/comma, named values " + "(af11,af12,af13,af21,af22,af23,af31,af32,af33,af41,af42,af43,cs1,cs2,cs3,cs4,cs5,cs6,cs7,default,ef)" + ), + default="", + ) + ai_load_sharing: bool = Field( + alias="aiLoadSharing", description="Enable IP load sharing using source and destination address for AI workloads", default=False + ) + priority_flow_control_watch_interval: int = Field( + alias="priorityFlowControlWatchInterval", + description="Acceptable values from 101 to 1000 (milliseconds). Leave blank for system default (100ms).", + default=101, + ) + + # PTP + ptp: bool = Field(description="Enable Precision Time Protocol (PTP)", default=False) + ptp_loopback_id: int = Field(alias="ptpLoopbackId", description="Precision Time Protocol Source Loopback Id", default=0) + ptp_domain_id: int = Field(alias="ptpDomainId", description="Multiple Independent PTP Clocking Subdomains on a Single Network", default=0) + ptp_vlan_id: int = Field(alias="ptpVlanId", description="Precision Time Protocol (PTP) Source VLAN ID. SVI used for ptp source on ToRs", default=2) + + # STP + stp_root_option: StpRootOptionEnum = Field( + alias="stpRootOption", + description=( + "Which protocol to use for configuring root bridge? rpvst+: Rapid Per-VLAN Spanning Tree, mst: Multiple " + "Spanning Tree, unmanaged (default): STP Root not managed by ND" + ), + default=StpRootOptionEnum.UNMANAGED, + ) + stp_vlan_range: str = Field(alias="stpVlanRange", description="Spanning tree Vlan range (minimum: 1, maximum: 4094)", default="1-3967") + mst_instance_range: str = Field(alias="mstInstanceRange", description="Minimum Spanning Tree instance range (minimum: 0, maximum: 4094)", default="0") + stp_bridge_priority: int = Field(alias="stpBridgePriority", description="Bridge priority for the spanning tree in increments of 4096", default=0) + + # MPLS Handoff + mpls_handoff: bool = Field(alias="mplsHandoff", description="Enable MPLS Handoff", default=False) + mpls_loopback_identifier: int = Field(alias="mplsLoopbackIdentifier", description="Used for VXLAN to MPLS SR/LDP Handoff", default=101) + mpls_isis_area_number: str = Field( + alias="mplsIsisAreaNumber", + description=( + "NET in form of XX.<4-hex-digit Custom Area Number>.XXXX.XXXX.XXXX.00, default Area Number is 0001, used " + "only if routing protocol on DCI MPLS link is is-is" + ), + default="0001", + ) + mpls_loopback_ip_range: str = Field(alias="mplsLoopbackIpRange", description="Used for VXLAN to MPLS SR/LDP Handoff", default="10.101.0.0/25") + + # Private VLAN + private_vlan: bool = Field(alias="privateVlan", description="Enable PVLAN on switches except spines and super spines", default=False) + default_private_vlan_secondary_network_template: str = Field( + alias="defaultPrivateVlanSecondaryNetworkTemplate", description="Default PVLAN secondary network template", default="Pvlan_Secondary_Network" + ) + allow_vlan_on_leaf_tor_pairing: AllowVlanOnLeafTorPairingEnum = Field( + alias="allowVlanOnLeafTorPairing", + description="Set trunk allowed vlan to 'none' or 'all' for leaf-tor pairing port-channels", + default=AllowVlanOnLeafTorPairingEnum.NONE, + ) + + # Leaf / TOR + leaf_tor_id_range: bool = Field(alias="leafTorIdRange", description="Use specific vPC/Port-channel ID range for leaf-tor pairings", default=False) + leaf_tor_vpc_port_channel_id_range: str = Field( + alias="leafTorVpcPortChannelIdRange", + description=( + "Specify vPC/Port-channel ID range (minimum: 1, maximum: 4096), this range is used for auto-allocating " + "vPC/Port-Channel IDs for leaf-tor pairings" + ), + default="1-499", + ) + + # Resource ID Ranges + l3_vni_no_vlan_default_option: bool = Field( + alias="l3VniNoVlanDefaultOption", + description=( + "L3 VNI configuration without VLAN configuration. This value is propagated on vrf creation as the default " + "value of 'Enable L3VNI w/o VLAN' in vrf" + ), + default=False, + ) + ip_service_level_agreement_id_range: str = Field( + alias="ipServiceLevelAgreementIdRange", + description=("Service Level Agreement (SLA) ID Range " "(minimum: 1, maximum: 655214748364735). Per switch SLA ID Range"), + default="10000-19999", + ) + object_tracking_number_range: str = Field( + alias="objectTrackingNumberRange", + description="Tracked Object ID Range (minimum: 1, maximum: 512) Per switch tracked object ID Range", + default="100-299", + ) + service_network_vlan_range: str = Field( + alias="serviceNetworkVlanRange", + description=("Service Network VLAN Range (minimum: 2, maximum: 4094). " "Per Switch Overlay Service Network VLAN Range"), + default="3000-3199", + ) + route_map_sequence_number_range: str = Field( + alias="routeMapSequenceNumberRange", description="Route Map Sequence Number Range (minimum: 1, maximum: 65534)", default="1-65534" + ) + + # DNS / NTP / Syslog Collections + ntp_server_collection: List[str] = Field(default_factory=lambda: ["string"], alias="ntpServerCollection") + ntp_server_vrf_collection: List[str] = Field(default_factory=lambda: ["string"], alias="ntpServerVrfCollection") + dns_collection: List[str] = Field(default_factory=lambda: ["5.192.28.174"], alias="dnsCollection") + dns_vrf_collection: List[str] = Field(default_factory=lambda: ["string"], alias="dnsVrfCollection") + syslog_server_collection: List[str] = Field(default_factory=lambda: ["string"], alias="syslogServerCollection") + syslog_server_vrf_collection: List[str] = Field(default_factory=lambda: ["string"], alias="syslogServerVrfCollection") + syslog_severity_collection: List[int] = Field( + default_factory=lambda: [7], alias="syslogSeverityCollection", description="List of Syslog severity values, one per Syslog server" + ) + + # Extra Config / Pre-Interface Config / AAA / Banner + banner: str = Field( + description=("Message of the Day (motd) banner. Delimiter char (very first char is delimiter char) followed by message " "ending with delimiter"), + default="", + ) + extra_config_leaf: str = Field( + alias="extraConfigLeaf", + description=( + "Additional CLIs as captured from the show running configuration, added after interface configurations for " + "all switches with a VTEP unless they have some spine role" + ), + default="", + ) + extra_config_spine: str = Field( + alias="extraConfigSpine", + description=( + "Additional CLIs as captured from the show running configuration, added after interface configurations for " "all switches with some spine role" + ), + default="", + ) + extra_config_tor: str = Field( + alias="extraConfigTor", + description=("Additional CLIs as captured from the show running configuration, added after interface configurations for " "all ToRs"), + default="", + ) + extra_config_intra_fabric_links: str = Field(alias="extraConfigIntraFabricLinks", description="Additional CLIs for all Intra-Fabric links", default="") + extra_config_aaa: str = Field(alias="extraConfigAaa", description="AAA Configurations", default="") + aaa: bool = Field(description="Include AAA configs from Manageability tab during device bootup", default=False) + pre_interface_config_leaf: str = Field( + alias="preInterfaceConfigLeaf", + description=( + "Additional CLIs as captured from the show running configuration, added before interface " + "configurations for all switches with a VTEP unless they have some spine role" + ), + default="", + ) + pre_interface_config_spine: str = Field( + alias="preInterfaceConfigSpine", + description=( + "Additional CLIs as captured from the show running configuration, added before interface " "configurations for all switches with some spine role" + ), + default="", + ) + pre_interface_config_tor: str = Field( + alias="preInterfaceConfigTor", + description=("Additional CLIs as captured from the show running configuration, added before interface " "configurations for all ToRs"), + default="", + ) + + # System / Compliance / OAM / Misc + anycast_border_gateway_advertise_physical_ip: bool = Field( + alias="anycastBorderGatewayAdvertisePhysicalIp", + description="To advertise Anycast Border Gateway PIP as VTEP. Effective on MSD fabric 'Recalculate Config'", + default=False, + ) + greenfield_debug_flag: GreenfieldDebugFlagEnum = Field( + alias="greenfieldDebugFlag", + description="Allow switch configuration to be cleared without a reload when preserveConfig is set to false", + default=GreenfieldDebugFlagEnum.DISABLE, + ) + interface_statistics_load_interval: int = Field( + alias="interfaceStatisticsLoadInterval", description="Interface Statistics Load Interval. Time in seconds", default=10 + ) + nve_hold_down_timer: int = Field(alias="nveHoldDownTimer", description="NVE Source Inteface HoldDown Time in seconds", default=180) + next_generation_oam: bool = Field( + alias="nextGenerationOAM", + description=("Enable the Next Generation (NG) OAM feature for all switches in the fabric to aid in trouble-shooting " "VXLAN EVPN fabrics"), + default=True, + ) + ngoam_south_bound_loop_detect: bool = Field( + alias="ngoamSouthBoundLoopDetect", description="Enable the Next Generation (NG) OAM southbound loop detection", default=False + ) + ngoam_south_bound_loop_detect_probe_interval: int = Field( + alias="ngoamSouthBoundLoopDetectProbeInterval", + description="Set Next Generation (NG) OAM southbound loop detection probe interval in seconds.", + default=300, + ) + ngoam_south_bound_loop_detect_recovery_interval: int = Field( + alias="ngoamSouthBoundLoopDetectRecoveryInterval", + description="Set the Next Generation (NG) OAM southbound loop detection recovery interval in seconds", + default=600, + ) + strict_config_compliance_mode: bool = Field( + alias="strictConfigComplianceMode", + description=("Enable bi-directional compliance checks to flag additional configs in the running config that are not in " "the intent/expected config"), + default=False, + ) + advanced_ssh_option: bool = Field( + alias="advancedSshOption", description="Enable AAA IP Authorization. Enable only, when IP Authorization is enabled in the AAA Server", default=False + ) + copp_policy: CoppPolicyEnum = Field( + alias="coppPolicy", + description="Fabric wide CoPP policy. Customized CoPP policy should be provided when 'manual' is selected.", + default=CoppPolicyEnum.STRICT, + ) + power_redundancy_mode: PowerRedundancyModeEnum = Field( + alias="powerRedundancyMode", description="Default Power Supply Mode for NX-OS Switches", default=PowerRedundancyModeEnum.REDUNDANT + ) + host_interface_admin_state: bool = Field(alias="hostInterfaceAdminState", description="Unshut Host Interfaces by Default", default=True) + heartbeat_interval: int = Field(alias="heartbeatInterval", description="XConnect heartbeat interval for periodic link status checks", default=190) + policy_based_routing: bool = Field( + alias="policyBasedRouting", + description="Enable feature pbr, sla sender, epbr, or enable feature pbr, based on the L4-L7 Services use case", + default=False, + ) + brownfield_network_name_format: str = Field( + alias="brownfieldNetworkNameFormat", + description="Generated network name should be less than 64 characters", + default="Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$", + ) + brownfield_skip_overlay_network_attachments: bool = Field( + alias="brownfieldSkipOverlayNetworkAttachments", + description="Skip Overlay Network Interface Attachments for Brownfield and Host Port Resync cases", + default=False, + ) + allow_smart_switch_onboarding: bool = Field( + alias="allowSmartSwitchOnboarding", description="Enable onboarding of smart switches to Hypershield for firewall service", default=False + ) + + # Hypershield / Connectivity + connectivity_domain_name: Optional[str] = Field(alias="connectivityDomainName", description="Domain name to connect to Hypershield", default=None) + hypershield_connectivity_proxy_server: Optional[str] = Field( + alias="hypershieldConnectivityProxyServer", + description="IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication", + default=None, + ) + hypershield_connectivity_proxy_server_port: Optional[int] = Field( + alias="hypershieldConnectivityProxyServerPort", description="Proxy port number for communication with Hypershield", default=None + ) + hypershield_connectivity_source_intf: Optional[str] = Field( + alias="hypershieldConnectivitySourceIntf", description="Loopback interface on smart switch for communication with Hypershield", default=None + ) + + @field_validator("bgp_asn") + @classmethod + def validate_bgp_asn(cls, value: str) -> str: + """ + # Summary + + Validate BGP ASN format and range. + + ## Description + + Accepts either a plain integer ASN (1-4294967295) or dotted four-byte + ASN notation in the form ``MMMM.NNNN`` where both parts are in the + range 1-65535 / 0-65535 respectively. + + ## Raises + + - `ValueError` - If the value does not match the expected ASN format + """ + if not BGP_ASN_RE.match(value): + raise ValueError(f"Invalid BGP ASN '{value}'. " "Expected a plain integer (1-4294967295) or dotted notation (1-65535.0-65535).") + return value + + @field_validator("site_id") + @classmethod + def validate_site_id(cls, value: str) -> str: + """ + # Summary + + Validate site ID format. + + ## Raises + + - `ValueError` - If site ID is not numeric or outside valid range + """ + + # If value is empty string (default), skip validation (will be set to BGP ASN later if still empty) + if value == "": + return value + + if not value.isdigit(): + raise ValueError(f"Site ID must be numeric, got: {value}") + + site_id_int = int(value) + if not (1 <= site_id_int <= 281474976710655): + raise ValueError(f"Site ID must be between 1 and 281474976710655, got: {site_id_int}") + + return value + + @field_validator("anycast_gateway_mac") + @classmethod + def validate_mac_address(cls, value: str) -> str: + """ + # Summary + + Validate MAC address format. + + ## Raises + + - `ValueError` - If MAC address format is invalid + """ + mac_pattern = re.compile(r"^([0-9a-fA-F]{4}\.){2}[0-9a-fA-F]{4}$") + if not mac_pattern.match(value): + raise ValueError(f"Invalid MAC address format, expected xxxx.xxxx.xxxx, got: {value}") + + return value.lower() + + +class FabricIbgpModel(NDBaseModel): + """ + # Summary + + Complete model for creating a new iBGP VXLAN fabric. + + This model combines all necessary components for fabric creation including + basic fabric properties, management settings, telemetry, and streaming configuration. + + ## Raises + + - `ValueError` - If required fields are missing or invalid + - `TypeError` - If field types don't match expected types + """ + + model_config = ConfigDict( + str_strip_whitespace=True, validate_assignment=True, populate_by_name=True, extra="allow" # Allow extra fields from API responses + ) + + identifiers: ClassVar[Optional[List[str]]] = ["fabric_name"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + + # Basic Fabric Properties + category: Literal["fabric"] = Field(description="Resource category", default="fabric") + fabric_name: str = Field(alias="name", description="Fabric name", min_length=1, max_length=64) + location: Optional[LocationModel] = Field(description="Geographic location of the fabric", default=None) + + # License and Operations + license_tier: LicenseTierEnum = Field(alias="licenseTier", description="License tier", default=LicenseTierEnum.PREMIER) + alert_suspend: AlertSuspendEnum = Field(alias="alertSuspend", description="Alert suspension state", default=AlertSuspendEnum.DISABLED) + telemetry_collection: bool = Field(alias="telemetryCollection", description="Enable telemetry collection", default=False) + telemetry_collection_type: str = Field(alias="telemetryCollectionType", description="Telemetry collection type", default="outOfBand") + telemetry_streaming_protocol: str = Field(alias="telemetryStreamingProtocol", description="Telemetry streaming protocol", default="ipv4") + telemetry_source_interface: str = Field(alias="telemetrySourceInterface", description="Telemetry source interface", default="") + telemetry_source_vrf: str = Field(alias="telemetrySourceVrf", description="Telemetry source VRF", default="") + security_domain: str = Field(alias="securityDomain", description="Security domain", default="all") + + # Core Management Configuration + management: Optional[VxlanIbgpManagementModel] = Field(description="iBGP VXLAN management configuration", default=None) + + # Optional Advanced Settings + telemetry_settings: Optional[TelemetrySettingsModel] = Field(alias="telemetrySettings", description="Telemetry configuration", default=None) + external_streaming_settings: ExternalStreamingSettingsModel = Field( + alias="externalStreamingSettings", description="External streaming settings", default_factory=ExternalStreamingSettingsModel + ) + + @field_validator("fabric_name") + @classmethod + def validate_fabric_name(cls, value: str) -> str: + """ + # Summary + + Validate fabric name format and characters. + + ## Raises + + - `ValueError` - If name contains invalid characters or format + """ + if not re.match(r"^[a-zA-Z0-9_-]+$", value): + raise ValueError(f"Fabric name can only contain letters, numbers, underscores, and hyphens, got: {value}") + + return value + + @model_validator(mode="after") + def validate_fabric_consistency(self) -> "FabricModel": + """ + # Summary + + Validate consistency between fabric settings and management configuration. + + ## Raises + + - `ValueError` - If fabric settings are inconsistent + """ + # Ensure management type matches model type + if self.management is not None and self.management.type != FabricTypeEnum.VXLAN_IBGP: + raise ValueError(f"Management type must be {FabricTypeEnum.VXLAN_IBGP}") + + # Propagate fabric name to management model + if self.management is not None: + self.management.name = self.fabric_name + + # Propagate BGP ASN to Site ID management model if not set + if self.management is not None and self.management.site_id == "": + bgp_asn = self.management.bgp_asn + if "." in bgp_asn: + # asdot notation (High.Low) → convert to asplain decimal: (High × 65536) + Low + high, low = bgp_asn.split(".") + self.management.site_id = str(int(high) * 65536 + int(low)) + else: + # Already plain decimal + self.management.site_id = bgp_asn + + # Validate telemetry consistency + if self.telemetry_collection and self.telemetry_settings is None: + # Auto-create default telemetry settings if collection is enabled + self.telemetry_settings = TelemetrySettingsModel() + + return self + + # TODO: to generate from Fields (low priority) + @classmethod + def get_argument_spec(cls) -> Dict: + return dict( + state={ + "type": "str", + "default": "merged", + "choices": ["merged", "replaced", "deleted", "overridden"], + }, + config={"required": False, "type": "list", "elements": "dict"}, + ) + + +# Export all models for external use +__all__ = [ + "LocationModel", + "NetflowExporterModel", + "NetflowRecordModel", + "NetflowMonitorModel", + "NetflowSettingsModel", + "BootstrapSubnetModel", + "TelemetryFlowCollectionModel", + "TelemetryMicroburstModel", + "TelemetryAnalysisSettingsModel", + "TelemetryEnergyManagementModel", + "TelemetrySettingsModel", + "ExternalStreamingSettingsModel", + "VxlanIbgpManagementModel", + "FabricIbgpModel", + "FabricTypeEnum", + "AlertSuspendEnum", + "LicenseTierEnum", + "ReplicationModeEnum", + "OverlayModeEnum", + "LinkStateRoutingProtocolEnum", +] diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index 50a5eeb2..f8f14e5d 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -75,7 +75,18 @@ def issubset(subset, superset): if not isinstance(subset, dict): if isinstance(subset, list): - return all(item in superset for item in subset) + if len(subset) != len(superset): + return False + + remaining = list(superset) + for item in subset: + for index, candidate in enumerate(remaining): + if issubset(item, candidate) and issubset(candidate, item): + del remaining[index] + break + else: + return False + return True return subset == superset for key, value in subset.items(): diff --git a/plugins/module_utils/nd_config_collection.py b/plugins/module_utils/nd_config_collection.py index 832cc132..4e3541cd 100644 --- a/plugins/module_utils/nd_config_collection.py +++ b/plugins/module_utils/nd_config_collection.py @@ -119,9 +119,15 @@ def delete(self, key: IdentifierKey) -> bool: # Diff Operations - def get_diff_config(self, new_item: NDBaseModel) -> Literal["new", "no_diff", "changed"]: + def get_diff_config(self, new_item: NDBaseModel, exclude_unset: bool = False) -> Literal["new", "no_diff", "changed"]: """ Compare single item against collection. + + Args: + new_item: The proposed configuration item. + exclude_unset: When True, only compare fields explicitly set in + ``new_item``. Useful for merge operations where unspecified + fields should not trigger a diff. """ try: key = self._extract_key(new_item) @@ -133,7 +139,7 @@ def get_diff_config(self, new_item: NDBaseModel) -> Literal["new", "no_diff", "c if existing is None: return "new" - is_subset = existing.get_diff(new_item) + is_subset = existing.get_diff(new_item, exclude_unset=exclude_unset) return "no_diff" if is_subset else "changed" diff --git a/plugins/module_utils/nd_state_machine.py b/plugins/module_utils/nd_state_machine.py index fb812c33..109c7ca3 100644 --- a/plugins/module_utils/nd_state_machine.py +++ b/plugins/module_utils/nd_state_machine.py @@ -77,7 +77,11 @@ def _manage_create_update_state(self) -> None: identifier = proposed_item.get_identifier_value() try: # Determine diff status - diff_status = self.existing.get_diff_config(proposed_item) + # For merged state, only compare fields explicitly provided by + # the user so that Pydantic default values do not trigger false + # diffs or overwrite existing configuration. + exclude_unset = self.state == "merged" + diff_status = self.existing.get_diff_config(proposed_item, exclude_unset=exclude_unset) # No changes needed if diff_status == "no_diff": diff --git a/plugins/module_utils/orchestrators/manage_fabric_ebgp.py b/plugins/module_utils/orchestrators/manage_fabric_ebgp.py new file mode 100644 index 00000000..2171189a --- /dev/null +++ b/plugins/module_utils/orchestrators/manage_fabric_ebgp.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Type +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_ebgp import FabricEbgpModel +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricsGet, + EpManageFabricsListGet, + EpManageFabricsPost, + EpManageFabricsPut, + EpManageFabricsDelete, +) + + +class ManageEbgpFabricOrchestrator(NDBaseOrchestrator): + model_class: Type[NDBaseModel] = FabricEbgpModel + + create_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPost + update_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPut + delete_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsDelete + query_one_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsGet + query_all_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsListGet + + def query_all(self) -> ResponseType: + """ + Custom query_all action to extract 'fabrics' from response, + filtered to only vxlanEbgp fabric types. + """ + try: + api_endpoint = self.query_all_endpoint() + result = self.sender.query_obj(api_endpoint.path) + fabrics = result.get("fabrics", []) or [] + return [f for f in fabrics if f.get("management", {}).get("type") == "vxlanEbgp"] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/manage_fabric_external.py b/plugins/module_utils/orchestrators/manage_fabric_external.py new file mode 100644 index 00000000..d370315a --- /dev/null +++ b/plugins/module_utils/orchestrators/manage_fabric_external.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Type +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_external import FabricExternalConnectivityModel +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricsGet, + EpManageFabricsListGet, + EpManageFabricsPost, + EpManageFabricsPut, + EpManageFabricsDelete, +) + + +class ManageExternalFabricOrchestrator(NDBaseOrchestrator): + model_class: Type[NDBaseModel] = FabricExternalConnectivityModel + + create_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPost + update_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPut + delete_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsDelete + query_one_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsGet + query_all_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsListGet + + def query_all(self) -> ResponseType: + """ + Custom query_all action to extract 'fabrics' from response, + filtered to only externalConnectivity fabric types. + """ + try: + api_endpoint = self.query_all_endpoint() + result = self.sender.query_obj(api_endpoint.path) + fabrics = result.get("fabrics", []) or [] + return [f for f in fabrics if f.get("management", {}).get("type") == "externalConnectivity"] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/orchestrators/manage_fabric_ibgp.py b/plugins/module_utils/orchestrators/manage_fabric_ibgp.py new file mode 100644 index 00000000..9fb5da78 --- /dev/null +++ b/plugins/module_utils/orchestrators/manage_fabric_ibgp.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Type +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.base import NDBaseOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_ibgp import FabricIbgpModel +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import NDEndpointBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.types import ResponseType +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricsGet, + EpManageFabricsListGet, + EpManageFabricsPost, + EpManageFabricsPut, + EpManageFabricsDelete, +) + + +class ManageIbgpFabricOrchestrator(NDBaseOrchestrator): + model_class: Type[NDBaseModel] = FabricIbgpModel + + create_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPost + update_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsPut + delete_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsDelete + query_one_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsGet + query_all_endpoint: Type[NDEndpointBaseModel] = EpManageFabricsListGet + + def query_all(self) -> ResponseType: + """ + Custom query_all action to extract 'fabrics' from response, + filtered to only vxlanIbgp fabric types. + """ + try: + api_endpoint = self.query_all_endpoint() + result = self.sender.query_obj(api_endpoint.path) + fabrics = result.get("fabrics", []) or [] + return [f for f in fabrics if f.get("management", {}).get("type") == "vxlanIbgp"] + except Exception as e: + raise Exception(f"Query all failed: {e}") from e diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 7d05e4af..d4c3e59b 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -36,7 +36,18 @@ def issubset(subset: Any, superset: Any) -> bool: if not isinstance(subset, dict): if isinstance(subset, list): - return all(item in superset for item in subset) + if len(subset) != len(superset): + return False + + remaining = list(superset) + for item in subset: + for index, candidate in enumerate(remaining): + if issubset(item, candidate) and issubset(candidate, item): + del remaining[index] + break + else: + return False + return True return subset == superset for key, value in subset.items(): diff --git a/plugins/modules/nd_manage_fabric_ebgp.py b/plugins/modules/nd_manage_fabric_ebgp.py new file mode 100644 index 00000000..dc6affaf --- /dev/null +++ b/plugins/modules/nd_manage_fabric_ebgp.py @@ -0,0 +1,1690 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: nd_manage_fabric_ebgp +version_added: "1.4.0" +short_description: Manage eBGP VXLAN fabrics on Cisco Nexus Dashboard +description: +- Manage eBGP VXLAN fabrics on Cisco Nexus Dashboard (ND). +- It supports creating, updating, replacing, and deleting eBGP VXLAN fabrics. +author: +- Mike Wiebe (@mwiebe) +options: + config: + description: + - The list of eBGP VXLAN fabrics to configure. + type: list + elements: dict + suboptions: + fabric_name: + description: + - The name of the fabric. + - Only letters, numbers, underscores, and hyphens are allowed. + - The O(config.fabric_name) must be defined when creating, updating or deleting a fabric. + type: str + required: true + category: + description: + - The resource category. + type: str + default: fabric + location: + description: + - The geographic location of the fabric. + type: dict + suboptions: + latitude: + description: + - Latitude coordinate of the fabric location (-90 to 90). + type: float + required: true + longitude: + description: + - Longitude coordinate of the fabric location (-180 to 180). + type: float + required: true + license_tier: + description: + - The license tier for the fabric. + type: str + default: premier + choices: [ essentials, advantage, premier ] + alert_suspend: + description: + - The alert suspension state for the fabric. + type: str + default: disabled + choices: [ enabled, disabled ] + telemetry_collection: + description: + - Enable telemetry collection for the fabric. + type: bool + default: false + telemetry_collection_type: + description: + - The telemetry collection type. + type: str + default: outOfBand + telemetry_streaming_protocol: + description: + - The telemetry streaming protocol. + type: str + default: ipv4 + telemetry_source_interface: + description: + - The telemetry source interface. + type: str + default: "" + telemetry_source_vrf: + description: + - The telemetry source VRF. + type: str + default: "" + security_domain: + description: + - The security domain associated with the fabric. + type: str + default: all + management: + description: + - The eBGP VXLAN management configuration for the fabric. + type: dict + suboptions: + type: + description: + - The fabric management type. Must be C(vxlanEbgp) for eBGP VXLAN fabrics. + type: str + default: vxlanEbgp + choices: [ vxlanEbgp ] + bgp_asn: + description: + - The BGP Autonomous System Number for the fabric. + - Must be a numeric value between 1 and 4294967295, or dotted notation (1-65535.0-65535). + - Optional when O(config.management.bgp_asn_auto_allocation) is C(true). + type: str + bgp_asn_auto_allocation: + description: + - Enable automatic BGP ASN allocation from the O(config.management.bgp_asn_range) pool. + type: bool + default: true + bgp_asn_range: + description: + - The BGP ASN range to use for automatic ASN allocation (e.g. C(65000-65535)). + - Required when O(config.management.bgp_asn_auto_allocation) is C(true). + type: str + bgp_as_mode: + description: + - The BGP AS mode for the fabric. + - C(multiAS) assigns a unique AS number per leaf/border/border gateway (borders and border gateways may share ASN). + - C(sameTierAS) assigns the same AS number within a tier (leafs share one ASN, borders/border gateways share one ASN). + type: str + default: multiAS + choices: [ multiAS, sameTierAS ] + bgp_allow_as_in_num: + description: + - The number of occurrences of the local AS number allowed in the BGP AS-path. + type: int + default: 1 + bgp_max_path: + description: + - The maximum number of BGP equal-cost paths. + type: int + default: 4 + bgp_underlay_failure_protect: + description: + - Enable BGP underlay failure protection. + type: bool + default: false + auto_configure_ebgp_evpn_peering: + description: + - Automatically configure eBGP EVPN overlay peering between leaf and spine switches. + type: bool + default: true + allow_leaf_same_as: + description: + - Allow leaf switches to have the same BGP ASN even when AS mode is Multi-AS. + type: bool + default: false + assign_ipv4_to_loopback0: + description: + - In an IPv6 routed fabric or VXLAN EVPN fabric with IPv6 underlay, assign IPv4 address + used for BGP Router ID to the routing loopback interface. + type: bool + default: true + evpn: + description: + - Enable BGP EVPN as the control plane and VXLAN as the data plane for this fabric. + type: bool + default: true + route_map_tag: + description: + - Tag for Route Map FABRIC-RMAP-REDIST-SUBNET. (Min 0, Max 4294967295). + type: int + default: 12345 + disable_route_map_tag: + description: + - Disable match tag for Route Map FABRIC-RMAP-REDIST-SUBNET. + type: bool + default: false + leaf_bgp_as: + description: + - The BGP AS number for leaf switches. + - Autonomous system number 1-4294967295 or dotted notation 1-65535.0-65535. + type: str + border_bgp_as: + description: + - The BGP AS number for border switches. + - Autonomous system number 1-4294967295 or dotted notation 1-65535.0-65535. + type: str + super_spine_bgp_as: + description: + - The BGP AS number for super-spine switches. + - Autonomous system number 1-4294967295 or dotted notation 1-65535.0-65535. + type: str + site_id: + description: + - The site identifier for EVPN Multi-Site support. + - Defaults to the value of O(config.management.bgp_asn) if not provided. + type: str + default: "" + bgp_loopback_id: + description: + - The underlay routing loopback interface ID (0-1023). + type: int + default: 0 + bgp_loopback_ip_range: + description: + - Typically Loopback0 IP address range. + type: str + default: "10.2.0.0/22" + bgp_loopback_ipv6_range: + description: + - Typically Loopback0 IPv6 address range. + type: str + default: "fd00::a02:0/119" + nve_loopback_id: + description: + - The underlay VTEP loopback ID associated with the NVE interface (0-1023). + type: int + default: 1 + nve_loopback_ip_range: + description: + - Typically Loopback1 IP address range. + type: str + default: "10.3.0.0/22" + nve_loopback_ipv6_range: + description: + - Typically Loopback1 and Anycast Loopback IPv6 address range. + type: str + default: "fd00::a03:0/118" + anycast_loopback_id: + description: + - Underlay anycast loopback ID. Used for vPC peering in VXLANv6 fabrics. + type: int + default: 10 + anycast_rendezvous_point_ip_range: + description: + - Anycast or Phantom RP IP address range. + type: str + default: "10.254.254.0/24" + ipv6_anycast_rendezvous_point_ip_range: + description: + - Anycast RP IPv6 address range. + type: str + default: "fd00::254:254:0/118" + intra_fabric_subnet_range: + description: + - Address range to assign numbered and peer link SVI IPs. + type: str + default: "10.4.0.0/16" + l2_vni_range: + description: + - Overlay network identifier range (minimum 1, maximum 16777214). + type: str + default: "30000-49000" + l3_vni_range: + description: + - Overlay VRF identifier range (minimum 1, maximum 16777214). + type: str + default: "50000-59000" + network_vlan_range: + description: + - Per switch overlay network VLAN range (minimum 2, maximum 4094). + type: str + default: "2300-2999" + vrf_vlan_range: + description: + - Per switch overlay VRF VLAN range (minimum 2, maximum 4094). + type: str + default: "2000-2299" + overlay_mode: + description: + - Overlay mode. VRF/Network configuration using config-profile or CLI. + type: str + default: cli + choices: [ cli, config-profile ] + replication_mode: + description: + - Replication mode for BUM traffic. + type: str + default: multicast + choices: [ multicast, ingress ] + multicast_group_subnet: + description: + - Multicast pool prefix between 8 to 30. A multicast group IPv4 from this pool + is used for BUM traffic for each overlay network. + type: str + default: "239.1.1.0/25" + auto_generate_multicast_group_address: + description: + - Generate a new multicast group address from the multicast pool using a round-robin approach. + type: bool + default: false + underlay_multicast_group_address_limit: + description: + - The maximum supported value is 128 for NX-OS version 10.2(1) or earlier + and 512 for versions above 10.2(1). + type: int + default: 128 + choices: [ 128, 512 ] + tenant_routed_multicast: + description: + - Enable overlay IPv4 multicast support in VXLAN fabrics. + type: bool + default: false + tenant_routed_multicast_ipv6: + description: + - Enable overlay IPv6 multicast support in VXLAN fabrics. + type: bool + default: false + first_hop_redundancy_protocol: + description: + - First hop redundancy protocol, HSRP or VRRP. + type: str + default: hsrp + choices: [ hsrp, vrrp ] + rendezvous_point_count: + description: + - Number of spines acting as Rendezvous-Points (RPs). + type: int + default: 2 + choices: [ 2, 4 ] + rendezvous_point_loopback_id: + description: + - The rendezvous point loopback interface ID. + type: int + default: 254 + rendezvous_point_mode: + description: + - Multicast rendezvous point mode. For IPv6 underlay, use C(asm) only. + type: str + default: asm + choices: [ asm, bidir ] + phantom_rendezvous_point_loopback_id1: + description: + - Underlay phantom rendezvous point loopback primary ID for PIM Bi-dir deployments. + type: int + default: 2 + phantom_rendezvous_point_loopback_id2: + description: + - Underlay phantom rendezvous point loopback secondary ID for PIM Bi-dir deployments. + type: int + default: 3 + phantom_rendezvous_point_loopback_id3: + description: + - Underlay phantom rendezvous point loopback tertiary ID for PIM Bi-dir deployments. + type: int + default: 4 + phantom_rendezvous_point_loopback_id4: + description: + - Underlay phantom rendezvous point loopback quaternary ID for PIM Bi-dir deployments. + type: int + default: 5 + l3vni_multicast_group: + description: + - Default underlay multicast group IPv4 address assigned for every overlay VRF. + type: str + default: "239.1.1.0" + l3_vni_ipv6_multicast_group: + description: + - Default underlay multicast group IPv6 address assigned for every overlay VRF. + type: str + default: "ff1e::" + ipv6_multicast_group_subnet: + description: + - IPv6 multicast address with prefix 112 to 128. + type: str + default: "ff1e::/121" + mvpn_vrf_route_import_id: + description: + - Enable MVPN VRI ID generation for tenant routed multicast with IPv4 underlay. + type: bool + default: true + mvpn_vrf_route_import_id_range: + description: + - MVPN VRI ID range (minimum 1, maximum 65535) for vPC, applicable when TRM is enabled + with IPv6 underlay, or O(config.management.mvpn_vrf_route_import_id) is enabled with IPv4 underlay. + type: str + vrf_route_import_id_reallocation: + description: + - One time VRI ID re-allocation based on MVPN VRI ID Range. + type: bool + default: false + target_subnet_mask: + description: + - Mask for underlay subnet IP range (24-31). + type: int + default: 30 + anycast_gateway_mac: + description: + - Shared anycast gateway MAC address for all VTEPs in xxxx.xxxx.xxxx format. + type: str + default: 2020.0000.00aa + fabric_mtu: + description: + - Intra fabric interface MTU. Must be an even number (1500-9216). + type: int + default: 9216 + l2_host_interface_mtu: + description: + - Layer 2 host interface MTU. Must be an even number (1500-9216). + type: int + default: 9216 + l3_vni_no_vlan_default_option: + description: + - L3 VNI configuration without VLAN configuration. This value is propagated on VRF + creation as the default value of Enable L3VNI w/o VLAN in VRF. + type: bool + default: false + underlay_ipv6: + description: + - Enable IPv6 underlay. If not enabled, IPv4 underlay is used. + type: bool + default: false + static_underlay_ip_allocation: + description: + - Disable dynamic underlay IP address allocation. + type: bool + default: false + anycast_border_gateway_advertise_physical_ip: + description: + - Advertise Anycast Border Gateway PIP as VTEP. + Effective on MSD fabric Recalculate Config. + type: bool + default: false + sub_interface_dot1q_range: + description: + - Per aggregation dot1q range for VRF-Lite connectivity (minimum 2, maximum 4093). + type: str + default: "2-511" + vrf_lite_auto_config: + description: + - VRF Lite Inter-Fabric Connection Deployment Options. + - If C(back2BackAndToExternal) is selected, VRF Lite IFCs are auto created between + border devices of two Easy Fabrics, and between border devices in Easy Fabric and + edge routers in External Fabric. + type: str + default: manual + choices: [ manual, back2BackAndToExternal ] + vrf_lite_subnet_range: + description: + - Address range to assign P2P interfabric connections. + type: str + default: "10.33.0.0/16" + vrf_lite_subnet_target_mask: + description: + - VRF Lite subnet mask. + type: int + default: 30 + auto_unique_vrf_lite_ip_prefix: + description: + - When enabled, IP prefix allocated to the VRF LITE IFC is not reused on VRF extension + over VRF LITE IFC. Instead, a unique IP subnet is allocated for each VRF extension. + type: bool + default: false + vpc_domain_id_range: + description: + - vPC domain ID range (minimum 1, maximum 1000) to use for new pairings. + type: str + default: "1-1000" + vpc_peer_link_vlan: + description: + - VLAN range (minimum 2, maximum 4094) for vPC Peer Link SVI. + type: str + default: "3600" + vpc_peer_link_enable_native_vlan: + description: + - Enable vPC peer link for native VLAN. + type: bool + default: false + vpc_peer_keep_alive_option: + description: + - Use vPC peer keep alive with loopback or management. + type: str + default: management + choices: [ loopback, management ] + vpc_auto_recovery_timer: + description: + - vPC auto recovery timer in seconds (240-3600). + type: int + default: 360 + vpc_delay_restore_timer: + description: + - vPC delay restore timer in seconds (1-3600). + type: int + default: 150 + vpc_peer_link_port_channel_id: + description: + - vPC peer link port channel ID (minimum 1, maximum 4096). + type: str + default: "500" + vpc_ipv6_neighbor_discovery_sync: + description: + - Enable IPv6 ND synchronization between vPC peers. + type: bool + default: true + vpc_layer3_peer_router: + description: + - Enable layer-3 peer-router on all leaf switches. + type: bool + default: true + vpc_tor_delay_restore_timer: + description: + - vPC delay restore timer for ToR switches in seconds. + type: int + default: 30 + fabric_vpc_domain_id: + description: + - Enable the same vPC domain ID for all vPC pairs. Not recommended. + type: bool + default: false + shared_vpc_domain_id: + description: + - vPC domain ID to be used on all vPC pairs. + type: int + default: 1 + fabric_vpc_qos: + description: + - QoS on spines for guaranteed delivery of vPC Fabric Peering communication. + type: bool + default: false + fabric_vpc_qos_policy_name: + description: + - QoS policy name. Should be the same on all spines. + type: str + default: spine_qos_for_fabric_vpc_peering + enable_peer_switch: + description: + - Enable the vPC peer-switch feature on ToR switches. + type: bool + default: false + per_vrf_loopback_auto_provision: + description: + - Auto provision an IPv4 loopback on a VTEP on VRF attachment. + - Enabling this option auto-provisions loopback on existing VRF attachments and also + when Edit, QuickAttach, or Multiattach actions are performed. + type: bool + default: false + per_vrf_loopback_ip_range: + description: + - Prefix pool to assign IPv4 addresses to loopbacks on VTEPs on a per VRF basis. + type: str + default: "10.5.0.0/22" + per_vrf_loopback_auto_provision_ipv6: + description: + - Auto provision an IPv6 loopback on a VTEP on VRF attachment. + type: bool + default: false + per_vrf_loopback_ipv6_range: + description: + - Prefix pool to assign IPv6 addresses to loopbacks on VTEPs on a per VRF basis. + type: str + default: "fd00::a05:0/112" + vrf_template: + description: + - Default overlay VRF template for leafs. + type: str + default: Default_VRF_Universal + network_template: + description: + - Default overlay network template for leafs. + type: str + default: Default_Network_Universal + vrf_extension_template: + description: + - Default overlay VRF template for borders. + type: str + default: Default_VRF_Extension_Universal + network_extension_template: + description: + - Default overlay network template for borders. + type: str + default: Default_Network_Extension_Universal + performance_monitoring: + description: + - If enabled, switch metrics are collected through periodic SNMP polling. + Alternative to real-time telemetry. + type: bool + default: false + tenant_dhcp: + description: + - Enable tenant DHCP. + type: bool + default: true + advertise_physical_ip: + description: + - For primary VTEP IP advertisement as next-hop of prefix routes. + type: bool + default: false + advertise_physical_ip_on_border: + description: + - Enable advertise-pip on vPC borders and border gateways only. + Applicable only when vPC advertise-pip is not enabled. + type: bool + default: true + bgp_authentication: + description: + - Enable BGP authentication. + type: bool + default: false + bgp_authentication_key_type: + description: + - BGP key encryption type. 3 - 3DES, 6 - Cisco type 6, 7 - Cisco type 7. + type: str + default: 3des + choices: [ 3des, type6, type7 ] + bgp_authentication_key: + description: + - Encrypted BGP authentication key based on type. + type: str + default: "" + bfd: + description: + - Enable BFD. Valid for IPv4 underlay only. + type: bool + default: false + bfd_ibgp: + description: + - Enable BFD for iBGP. + type: bool + default: false + bfd_authentication: + description: + - Enable BFD authentication. Valid for P2P interfaces only. + type: bool + default: false + bfd_authentication_key_id: + description: + - BFD authentication key ID. + type: int + default: 100 + bfd_authentication_key: + description: + - Encrypted SHA1 secret value. + type: str + default: "" + pim_hello_authentication: + description: + - Enable PIM hello authentication. Valid for IPv4 underlay only. + type: bool + default: false + pim_hello_authentication_key: + description: + - PIM hello authentication key. 3DES encrypted. + type: str + default: "" + nxapi: + description: + - Enable NX-API over HTTPS. + type: bool + default: false + nxapi_http: + description: + - Enable NX-API over HTTP. + type: bool + default: false + nxapi_https_port: + description: + - HTTPS port for NX-API (1-65535). + type: int + default: 443 + nxapi_http_port: + description: + - HTTP port for NX-API (1-65535). + type: int + default: 80 + day0_bootstrap: + description: + - Automatic IP assignment for POAP. + type: bool + default: false + bootstrap_subnet_collection: + description: + - List of IPv4 or IPv6 subnets to be used for bootstrap. + - When O(state=merged), omitting this option preserves the existing collection. + - When O(state=merged), providing this option replaces the entire collection with the supplied list. + - Under O(state=merged), entries in this list are not merged item-by-item. + - Under O(state=merged), removing one entry from the playbook removes it from the fabric, and setting an empty list clears the collection. + - When O(state=replaced), this option is also treated as the exact desired collection. + - When O(state=replaced), omitting this option resets the collection to its default empty value. + type: list + elements: dict + suboptions: + start_ip: + description: + - Starting IP address of the bootstrap range. + type: str + required: true + end_ip: + description: + - Ending IP address of the bootstrap range. + type: str + required: true + default_gateway: + description: + - Default gateway for the bootstrap subnet. + type: str + required: true + subnet_prefix: + description: + - Subnet prefix length (8-30). + type: int + required: true + local_dhcp_server: + description: + - Automatic IP assignment for POAP from local DHCP server. + type: bool + default: false + dhcp_protocol_version: + description: + - IP protocol version for local DHCP server. + type: str + default: dhcpv4 + choices: [ dhcpv4, dhcpv6 ] + dhcp_start_address: + description: + - DHCP scope start address for switch POAP. + type: str + default: "" + dhcp_end_address: + description: + - DHCP scope end address for switch POAP. + type: str + default: "" + management_gateway: + description: + - Default gateway for management VRF on the switch. + type: str + default: "" + management_ipv4_prefix: + description: + - Switch management IP subnet prefix for IPv4. + type: int + default: 24 + management_ipv6_prefix: + description: + - Switch management IP subnet prefix for IPv6. + type: int + default: 64 + netflow_settings: + description: + - Netflow configuration settings. + type: dict + suboptions: + netflow: + description: + - Enable netflow collection. + type: bool + default: false + netflow_exporter_collection: + description: + - List of netflow exporters. + type: list + elements: dict + suboptions: + exporter_name: + description: + - Name of the netflow exporter. + type: str + required: true + exporter_ip: + description: + - IP address of the netflow collector. + type: str + required: true + vrf: + description: + - VRF name for the exporter. + type: str + default: management + source_interface_name: + description: + - Source interface name. + type: str + required: true + udp_port: + description: + - UDP port for netflow export (1-65535). + type: int + netflow_record_collection: + description: + - List of netflow records. + type: list + elements: dict + suboptions: + record_name: + description: + - Name of the netflow record. + type: str + required: true + record_template: + description: + - Template type for the record. + type: str + required: true + layer2_record: + description: + - Enable layer 2 record fields. + type: bool + default: false + netflow_monitor_collection: + description: + - List of netflow monitors. + type: list + elements: dict + suboptions: + monitor_name: + description: + - Name of the netflow monitor. + type: str + required: true + record_name: + description: + - Associated record name. + type: str + required: true + exporter1_name: + description: + - Primary exporter name. + type: str + required: true + exporter2_name: + description: + - Secondary exporter name. + type: str + default: "" + real_time_backup: + description: + - Backup hourly only if there is any config deployment since last backup. + type: bool + scheduled_backup: + description: + - Enable backup at the specified time daily. + type: bool + scheduled_backup_time: + description: + - Time (UTC) in 24 hour format to take a daily backup if enabled (00:00 to 23:59). + type: str + default: "" + leaf_tor_id_range: + description: + - Use specific vPC/Port-channel ID range for leaf-tor pairings. + type: bool + default: false + leaf_tor_vpc_port_channel_id_range: + description: + - vPC/Port-channel ID range (minimum 1, maximum 4096), used for auto-allocating + vPC/Port-Channel IDs for leaf-tor pairings. + type: str + default: "1-499" + allow_vlan_on_leaf_tor_pairing: + description: + - Set trunk allowed VLAN to none or all for leaf-tor pairing port-channels. + type: str + default: none + choices: [ none, all ] + ntp_server_collection: + description: + - List of NTP server IPv4/IPv6 addresses and/or hostnames. + type: list + elements: str + ntp_server_vrf_collection: + description: + - NTP Server VRFs. One VRF for all NTP servers or a list of VRFs, one per NTP server. + type: list + elements: str + dns_collection: + description: + - List of IPv4 and IPv6 DNS addresses. + type: list + elements: str + dns_vrf_collection: + description: + - DNS Server VRFs. One VRF for all DNS servers or a list of VRFs, one per DNS server. + type: list + elements: str + syslog_server_collection: + description: + - List of syslog server IPv4/IPv6 addresses and/or hostnames. + type: list + elements: str + syslog_server_vrf_collection: + description: + - Syslog Server VRFs. One VRF for all syslog servers or a list of VRFs, one per syslog server. + type: list + elements: str + syslog_severity_collection: + description: + - List of syslog severity values, one per syslog server. + type: list + elements: int + banner: + description: + - Message of the Day (motd) banner. Delimiter char (very first char is delimiter char) + followed by message ending with delimiter. + type: str + default: "" + extra_config_leaf: + description: + - Additional CLIs added after interface configurations for all switches with a VTEP + unless they have some spine role. + type: str + default: "" + extra_config_spine: + description: + - Additional CLIs added after interface configurations for all switches with some spine role. + type: str + default: "" + extra_config_tor: + description: + - Additional CLIs added after interface configurations for all ToRs. + type: str + default: "" + extra_config_intra_fabric_links: + description: + - Additional CLIs for all intra-fabric links. + type: str + default: "" + extra_config_aaa: + description: + - AAA configurations. + type: str + default: "" + extra_config_nxos_bootstrap: + description: + - Additional CLIs required during device bootup/login e.g. AAA/Radius. + type: str + default: "" + aaa: + description: + - Include AAA configs from Manageability tab during device bootup. + type: bool + default: false + pre_interface_config_leaf: + description: + - Additional CLIs added before interface configurations for all switches with a VTEP + unless they have some spine role. + type: str + default: "" + pre_interface_config_spine: + description: + - Additional CLIs added before interface configurations for all switches with some spine role. + type: str + default: "" + pre_interface_config_tor: + description: + - Additional CLIs added before interface configurations for all ToRs. + type: str + default: "" + greenfield_debug_flag: + description: + - Allow switch configuration to be cleared without a reload when preserveConfig is set to false. + type: str + default: disable + choices: [ enable, disable ] + interface_statistics_load_interval: + description: + - Interface statistics load interval in seconds. + type: int + default: 10 + nve_hold_down_timer: + description: + - NVE source interface hold-down time in seconds. + type: int + default: 180 + next_generation_oam: + description: + - Enable the Next Generation (NG) OAM feature for all switches in the fabric + to aid in troubleshooting VXLAN EVPN fabrics. + type: bool + default: true + ngoam_south_bound_loop_detect: + description: + - Enable the Next Generation (NG) OAM southbound loop detection. + type: bool + default: false + ngoam_south_bound_loop_detect_probe_interval: + description: + - Next Generation (NG) OAM southbound loop detection probe interval in seconds. + type: int + default: 300 + ngoam_south_bound_loop_detect_recovery_interval: + description: + - Next Generation (NG) OAM southbound loop detection recovery interval in seconds. + type: int + default: 600 + strict_config_compliance_mode: + description: + - Enable bi-directional compliance checks to flag additional configs in the running + config that are not in the intent/expected config. + type: bool + default: false + advanced_ssh_option: + description: + - Enable AAA IP Authorization. Enable only when IP Authorization is enabled + in the AAA Server. + type: bool + default: false + copp_policy: + description: + - Fabric wide CoPP policy. Customized CoPP policy should be provided when C(manual) is selected. + type: str + default: strict + choices: [ dense, lenient, moderate, strict, manual ] + power_redundancy_mode: + description: + - Default power supply mode for NX-OS switches. + type: str + default: redundant + choices: [ redundant, combined, inputSrcRedundant ] + heartbeat_interval: + description: + - XConnect heartbeat interval for periodic link status checks. + type: int + default: 190 + snmp_trap: + description: + - Configure ND as a receiver for SNMP traps. + type: bool + default: true + cdp: + description: + - Enable CDP on management interface. + type: bool + default: false + real_time_interface_statistics_collection: + description: + - Enable real time interface statistics collection. Valid for NX-OS only. + type: bool + default: false + tcam_allocation: + description: + - TCAM commands are automatically generated for VxLAN and vPC Fabric Peering when enabled. + type: bool + default: true + allow_smart_switch_onboarding: + description: + - Enable onboarding of smart switches to Hypershield for firewall service. + type: bool + default: false + default_queuing_policy: + description: + - Enable default queuing policies. + type: bool + default: false + default_queuing_policy_cloudscale: + description: + - Queuing policy for all 92xx, -EX, -FX, -FX2, -FX3, -GX series switches in the fabric. + type: str + default: queuing_policy_default_8q_cloudscale + default_queuing_policy_r_series: + description: + - Queueing policy for all Nexus R-series switches. + type: str + default: queuing_policy_default_r_series + default_queuing_policy_other: + description: + - Queuing policy for all other switches in the fabric. + type: str + default: queuing_policy_default_other + aiml_qos: + description: + - Configures QoS and Queuing Policies specific to N9K Cloud Scale (CS) and + Silicon One (S1) switch fabric for AI network workloads. + type: bool + default: false + aiml_qos_policy: + description: + - Queuing policy based on predominant fabric link speed. + C(User-defined) allows for custom configuration. + type: str + default: 400G + choices: [ 800G, 400G, 100G, 25G, User-defined ] + roce_v2: + description: + - DSCP for RDMA traffic. Numeric (0-63) with ranges/comma, or named values + (af11, af12, af13, af21, af22, af23, af31, af32, af33, af41, af42, af43, + cs1, cs2, cs3, cs4, cs5, cs6, cs7, default, ef). + type: str + default: "26" + cnp: + description: + - DSCP value for Congestion Notification. Numeric (0-63) with ranges/comma, or named values + (af11, af12, af13, af21, af22, af23, af31, af32, af33, af41, af42, af43, + cs1, cs2, cs3, cs4, cs5, cs6, cs7, default, ef). + type: str + default: "48" + wred_min: + description: + - WRED minimum threshold in kbytes. + type: int + default: 950 + wred_max: + description: + - WRED maximum threshold in kbytes. + type: int + default: 3000 + wred_drop_probability: + description: + - WRED drop probability percentage. + type: int + default: 7 + wred_weight: + description: + - Influences how quickly WRED reacts to queue depth changes. + type: int + default: 0 + bandwidth_remaining: + description: + - Percentage of remaining bandwidth allocated to AI traffic queues. + type: int + default: 50 + dlb: + description: + - Enables fabric-level Dynamic Load Balancing (DLB) configuration. + Inter-Switch-Links (ISL) will be configured as DLB interfaces. + type: bool + default: false + dlb_mode: + description: + - Select system-wide flowlet, per-packet (packet spraying) or policy driven mixed mode. + Mixed mode is supported on Silicon One (S1) platform only. + type: str + default: flowlet + choices: [ flowlet, per-packet, policy-driven-flowlet, policy-driven-per-packet, policy-driven-mixed-mode ] + dlb_mixed_mode_default: + description: + - Default load balancing mode for policy driven mixed mode DLB. + type: str + default: ecmp + choices: [ ecmp, flowlet, per-packet ] + flowlet_aging: + description: + - Flowlet aging timer in microseconds. Valid range depends on platform. + Cloud Scale (CS) 1-2000000 (default 500), Silicon One (S1) 1-1024 (default 256). + type: int + flowlet_dscp: + description: + - DSCP values for flowlet load balancing. Numeric (0-63) with ranges/comma, or named values + (af11, af12, af13, af21, af22, af23, af31, af32, af33, af41, af42, af43, + cs1, cs2, cs3, cs4, cs5, cs6, cs7, default, ef). + type: str + default: "" + per_packet_dscp: + description: + - DSCP values for per-packet load balancing. Numeric (0-63) with ranges/comma, or named values + (af11, af12, af13, af21, af22, af23, af31, af32, af33, af41, af42, af43, + cs1, cs2, cs3, cs4, cs5, cs6, cs7, default, ef). + type: str + default: "" + ai_load_sharing: + description: + - Enable IP load sharing using source and destination address for AI workloads. + type: bool + default: false + priority_flow_control_watch_interval: + description: + - Acceptable values from 101 to 1000 (milliseconds). + Leave blank for system default (100ms). + type: int + ptp: + description: + - Enable Precision Time Protocol (PTP). + type: bool + default: false + ptp_loopback_id: + description: + - Precision Time Protocol source loopback ID. + type: int + default: 0 + ptp_domain_id: + description: + - Multiple independent PTP clocking subdomains on a single network. + type: int + default: 0 + private_vlan: + description: + - Enable PVLAN on switches except spines and super spines. + type: bool + default: false + default_private_vlan_secondary_network_template: + description: + - Default PVLAN secondary network template. + type: str + default: Pvlan_Secondary_Network + macsec: + description: + - Enable MACsec in the fabric. MACsec fabric parameters are used for configuring + MACsec on a fabric link if MACsec is enabled on the link. + type: bool + default: false + macsec_cipher_suite: + description: + - Configure MACsec cipher suite. + type: str + default: GCM-AES-XPN-256 + choices: [ GCM-AES-128, GCM-AES-256, GCM-AES-XPN-128, GCM-AES-XPN-256 ] + macsec_key_string: + description: + - MACsec primary key string. Cisco Type 7 encrypted octet string. + type: str + default: "" + macsec_algorithm: + description: + - MACsec primary cryptographic algorithm. AES_128_CMAC or AES_256_CMAC. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + macsec_fallback_key_string: + description: + - MACsec fallback key string. Cisco Type 7 encrypted octet string. + type: str + default: "" + macsec_fallback_algorithm: + description: + - MACsec fallback cryptographic algorithm. AES_128_CMAC or AES_256_CMAC. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + macsec_report_timer: + description: + - MACsec operational status periodic report timer in minutes. + type: int + default: 5 + enable_dpu_pinning: + description: + - Enable pinning of VRFs and networks to specific DPUs on smart switches. + type: bool + default: false + connectivity_domain_name: + description: + - Domain name to connect to Hypershield. + type: str + hypershield_connectivity_proxy_server: + description: + - IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication. + type: str + hypershield_connectivity_proxy_server_port: + description: + - Proxy port number for communication with Hypershield. + type: int + hypershield_connectivity_source_intf: + description: + - Loopback interface on smart switch for communication with Hypershield. + type: str + telemetry_settings: + description: + - Telemetry configuration settings. + type: dict + suboptions: + flow_collection: + description: + - Flow collection settings. + type: dict + suboptions: + traffic_analytics: + description: + - Traffic analytics state. + type: str + default: enabled + traffic_analytics_scope: + description: + - Traffic analytics scope. + type: str + default: intraFabric + operating_mode: + description: + - Operating mode. + type: str + default: flowTelemetry + udp_categorization: + description: + - UDP categorization. + type: str + default: enabled + microburst: + description: + - Microburst detection settings. + type: dict + suboptions: + microburst: + description: + - Enable microburst detection. + type: bool + default: false + sensitivity: + description: + - Microburst sensitivity level. + type: str + default: low + analysis_settings: + description: + - Telemetry analysis settings. + type: dict + suboptions: + is_enabled: + description: + - Enable telemetry analysis. + type: bool + default: false + nas: + description: + - NAS telemetry configuration. + type: dict + suboptions: + server: + description: + - NAS server address. + type: str + default: "" + export_settings: + description: + - NAS export settings. + type: dict + suboptions: + export_type: + description: + - Export type. + type: str + default: full + export_format: + description: + - Export format. + type: str + default: json + energy_management: + description: + - Energy management settings. + type: dict + suboptions: + cost: + description: + - Energy cost per unit. + type: float + default: 1.2 + external_streaming_settings: + description: + - External streaming settings. + type: dict + suboptions: + email: + description: + - Email streaming configuration. + type: list + elements: dict + message_bus: + description: + - Message bus configuration. + type: list + elements: dict + syslog: + description: + - Syslog streaming configuration. + type: dict + webhooks: + description: + - Webhook configuration. + type: list + elements: dict + state: + description: + - The desired state of the fabric resources on the Cisco Nexus Dashboard. + - Use O(state=merged) to create new fabrics and update existing ones as defined in the configuration. + Resources on ND that are not specified in the configuration will be left unchanged. + - Use O(state=replaced) to replace the fabric configuration specified in the configuration. + Any settings not explicitly provided will revert to their defaults. + - Use O(state=overridden) to enforce the configuration as the single source of truth. + Any fabric existing on ND but not present in the configuration will be deleted. Use with extra caution. + - Use O(state=deleted) to remove the fabrics specified in the configuration from the Cisco Nexus Dashboard. + type: str + default: merged + choices: [ merged, replaced, overridden, deleted ] +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module is only supported on Nexus Dashboard having version 4.1.0 or higher. +- Only eBGP VXLAN fabric type (C(vxlanEbgp)) is supported by this module. +- When using O(state=replaced) with only required fields, all optional management settings revert to their defaults. +- The O(config.management.bgp_asn) field is optional when O(config.management.bgp_asn_auto_allocation) is C(true). +- The O(config.management.bgp_asn) field is required when O(config.management.bgp_asn_auto_allocation) is C(false). +- O(config.management.site_id) defaults to the value of O(config.management.bgp_asn) if not provided. +- The default O(config.management.vpc_peer_keep_alive_option) for eBGP fabrics is C(management), unlike iBGP fabrics. +""" + +EXAMPLES = r""" +- name: Create an eBGP VXLAN fabric using state merged (with auto ASN allocation) + cisco.nd.nd_manage_fabric_ebgp: + state: merged + config: + - fabric_name: my_ebgp_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65535" + bgp_as_mode: multiAS + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00aa" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: false + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: disable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: result + +- name: Create an eBGP VXLAN fabric with a static BGP ASN + cisco.nd.nd_manage_fabric_ebgp: + state: merged + config: + - fabric_name: my_ebgp_fabric_static + category: fabric + management: + type: vxlanEbgp + bgp_asn: "65001" + bgp_asn_auto_allocation: false + site_id: "65001" + bgp_as_mode: multiAS + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00aa" + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + register: result + +- name: Update specific fields on an existing eBGP fabric using state merged (partial update) + cisco.nd.nd_manage_fabric_ebgp: + state: merged + config: + - fabric_name: my_ebgp_fabric + category: fabric + management: + bgp_asn_range: "65100-65199" + anycast_gateway_mac: "2020.0000.00bb" + performance_monitoring: true + register: result + +- name: Create or fully replace an eBGP VXLAN fabric using state replaced + cisco.nd.nd_manage_fabric_ebgp: + state: replaced + config: + - fabric_name: my_ebgp_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65004" + bgp_asn_auto_allocation: false + site_id: "65004" + bgp_as_mode: multiAS + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" + performance_monitoring: true + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" + rendezvous_point_count: 3 + rendezvous_point_loopback_id: 253 + vpc_peer_link_vlan: "3700" + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 300 + vpc_delay_restore_timer: 120 + vpc_peer_link_port_channel_id: "600" + advertise_physical_ip: true + vpc_domain_id_range: "1-800" + fabric_mtu: 9000 + l2_host_interface_mtu: 9000 + tenant_dhcp: false + snmp_trap: false + anycast_border_gateway_advertise_physical_ip: true + greenfield_debug_flag: disable + tcam_allocation: false + real_time_interface_statistics_collection: true + interface_statistics_load_interval: 30 + bgp_loopback_ip_range: "10.22.0.0/22" + nve_loopback_ip_range: "10.23.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.252.0/24" + intra_fabric_subnet_range: "10.24.0.0/16" + l2_vni_range: "40000-59000" + l3_vni_range: "60000-69000" + network_vlan_range: "2400-3099" + vrf_vlan_range: "2100-2399" + banner: "^ Managed by Ansible ^" + register: result + +- name: Replace fabric with only required fields (all optional settings revert to defaults) + cisco.nd.nd_manage_fabric_ebgp: + state: replaced + config: + - fabric_name: my_ebgp_fabric + category: fabric + management: + type: vxlanEbgp + bgp_asn: "65004" + bgp_asn_auto_allocation: false + site_id: "65004" + banner: "^ Managed by Ansible ^" + register: result + +- name: Enforce exact fabric inventory using state overridden (deletes unlisted fabrics) + cisco.nd.nd_manage_fabric_ebgp: + state: overridden + config: + - fabric_name: fabric_east + category: fabric + location: + latitude: 40.7128 + longitude: -74.0060 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65010" + bgp_asn_auto_allocation: false + site_id: "65010" + bgp_as_mode: multiAS + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0010" + replication_mode: multicast + multicast_group_subnet: "239.1.10.0/25" + bgp_loopback_ip_range: "10.10.0.0/22" + nve_loopback_ip_range: "10.11.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.10.0/24" + intra_fabric_subnet_range: "10.12.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + - fabric_name: fabric_west + category: fabric + location: + latitude: 34.0522 + longitude: -118.2437 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65020" + bgp_asn_auto_allocation: false + site_id: "65020" + bgp_as_mode: multiAS + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0020" + replication_mode: multicast + multicast_group_subnet: "239.1.20.0/25" + bgp_loopback_ip_range: "10.20.0.0/22" + nve_loopback_ip_range: "10.21.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.20.0/24" + intra_fabric_subnet_range: "10.22.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + register: result + +- name: Delete a specific eBGP fabric using state deleted + cisco.nd.nd_manage_fabric_ebgp: + state: deleted + config: + - fabric_name: my_ebgp_fabric + register: result + +- name: Delete multiple eBGP fabrics in a single task + cisco.nd.nd_manage_fabric_ebgp: + state: deleted + config: + - fabric_name: fabric_east + - fabric_name: fabric_west + - fabric_name: fabric_old + register: result +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec +from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_ebgp import FabricEbgpModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.manage_fabric_ebgp import ManageEbgpFabricOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDStateMachineError + + +def main(): + argument_spec = nd_argument_spec() + argument_spec.update(FabricEbgpModel.get_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + # Initialize StateMachine + nd_state_machine = NDStateMachine( + module=module, + model_orchestrator=ManageEbgpFabricOrchestrator, + ) + + # Manage state + nd_state_machine.manage_state() + + module.exit_json(**nd_state_machine.output.format()) + + except NDStateMachineError as e: + module.fail_json(msg=str(e)) + except Exception as e: + module.fail_json(msg=f"Module execution failed: {str(e)}") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nd_manage_fabric_external.py b/plugins/modules/nd_manage_fabric_external.py new file mode 100644 index 00000000..0bed6cc3 --- /dev/null +++ b/plugins/modules/nd_manage_fabric_external.py @@ -0,0 +1,780 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: nd_manage_fabric_external +version_added: "1.4.0" +short_description: Manage External Connectivity fabrics on Cisco Nexus Dashboard +description: +- Manage External Connectivity fabrics on Cisco Nexus Dashboard (ND). +- It supports creating, updating, replacing, and deleting External Connectivity fabrics. +author: +- Mike Wiebe (@mwiebe) +options: + config: + description: + - The list of External Connectivity fabrics to configure. + type: list + elements: dict + suboptions: + fabric_name: + description: + - The name of the fabric. + - Only letters, numbers, underscores, and hyphens are allowed. + - The O(config.fabric_name) must be defined when creating, updating or deleting a fabric. + type: str + required: true + category: + description: + - The resource category. + type: str + default: fabric + location: + description: + - The geographic location of the fabric. + type: dict + suboptions: + latitude: + description: + - Latitude coordinate of the fabric location (-90 to 90). + type: float + required: true + longitude: + description: + - Longitude coordinate of the fabric location (-180 to 180). + type: float + required: true + license_tier: + description: + - License Tier value of a fabric. + type: str + default: premier + choices: [ essentials, advantage, premier ] + alert_suspend: + description: + - Alert Suspend state configured on the fabric. + type: str + default: disabled + choices: [ enabled, disabled ] + telemetry_collection: + description: + - Enable telemetry collection for the fabric. + type: bool + default: false + telemetry_collection_type: + description: + - Telemetry collection method. + type: str + default: outOfBand + choices: [ inBand, outOfBand ] + telemetry_streaming_protocol: + description: + - Telemetry Streaming Protocol. + type: str + default: ipv4 + choices: [ ipv4, ipv6 ] + telemetry_source_interface: + description: + - Telemetry Source Interface (VLAN id or Loopback id) only valid if Telemetry Collection is set to inBand. + type: str + default: "" + telemetry_source_vrf: + description: + - VRF over which telemetry is streamed, valid only if telemetry collection is set to inband. + type: str + default: "" + security_domain: + description: + - Security Domain associated with the fabric. + type: str + default: all + management: + description: + - The External Connectivity management configuration for the fabric. + type: dict + suboptions: + type: + description: + - The fabric management type. Must be C(externalConnectivity) for External Connectivity fabrics. + type: str + default: externalConnectivity + choices: [ externalConnectivity ] + bgp_asn: + description: + - Autonomous system number 1-4294967295 | 1-65535[.0-65535]. + type: str + required: true + aaa: + description: + - Include AAA configs from Advanced tab during device bootup. + type: bool + default: false + advanced_ssh_option: + description: + - Enable only, when IP Authorization is enabled in the AAA Server. + type: bool + default: false + allow_same_loopback_ip_on_switches: + description: + - Allow the same loopback IP address to be configured on multiple switches (e.g. RP loopback IP). + type: bool + default: false + allow_smart_switch_onboarding: + description: + - Enable onboarding of smart switches to Hypershield for firewall service. + type: bool + default: false + bootstrap_subnet_collection: + description: + - List of IPv4 or IPv6 subnets to be used for bootstrap. + - When O(state=merged), omitting this option preserves the existing collection. + - When O(state=merged), providing this option replaces the entire collection with the supplied list. + - Under O(state=merged), entries in this list are not merged item-by-item. + - Under O(state=merged), removing one entry from the playbook removes it from the fabric, and setting an empty list clears the collection. + - When O(state=replaced), this option is also treated as the exact desired collection. + - When O(state=replaced), omitting this option resets the collection to its default empty value. + type: list + elements: dict + suboptions: + start_ip: + description: + - Starting IP address of the bootstrap range. + type: str + required: true + end_ip: + description: + - Ending IP address of the bootstrap range. + type: str + required: true + default_gateway: + description: + - Default gateway for bootstrap subnet. + type: str + required: true + subnet_prefix: + description: + - Subnet prefix length (8-30). + type: int + required: true + cdp: + description: + - Enable CDP on management interface. + type: bool + default: false + copp_policy: + description: + - Fabric wide CoPP policy. + - Customized CoPP policy should be provided when C(manual) is selected. + type: str + default: manual + choices: [ dense, lenient, moderate, strict, manual ] + create_bgp_config: + description: + - Generate BGP configuration for core and edge routers. + type: bool + default: true + day0_bootstrap: + description: + - Support day 0 touchless switch bringup. + type: bool + default: false + day0_plug_and_play: + description: + - Enable Plug n Play for Catalyst 9000 switches. + type: bool + default: false + dhcp_end_address: + description: + - DHCP Scope End Address For Switch POAP. + type: str + default: "" + dhcp_protocol_version: + description: + - IP protocol version for Local DHCP Server. + type: str + default: dhcpv4 + choices: [ dhcpv4, dhcpv6 ] + dhcp_start_address: + description: + - DHCP Scope Start Address For Switch POAP. + type: str + default: "" + dns_collection: + description: + - List of IPv4 and IPv6 DNS addresses. + type: list + elements: str + dns_vrf_collection: + description: + - DNS Server VRFs. + - One VRF for all DNS servers or a list of VRFs, one per DNS server. + type: list + elements: str + domain_name: + description: + - Domain name for DHCP server PnP block. + type: str + default: "" + enable_dpu_pinning: + description: + - Enable pinning of VRFs and networks to specific DPUs on smart switches. + type: bool + default: false + extra_config_aaa: + description: + - Additional CLIs for AAA Configuration. + type: str + default: "" + extra_config_fabric: + description: + - Additional CLIs for all switches. + type: str + default: "" + extra_config_nxos_bootstrap: + description: + - Additional CLIs required during device bootup/login e.g. AAA/Radius (NX-OS). + type: str + default: "" + extra_config_xe_bootstrap: + description: + - Additional CLIs required during device bootup/login e.g. AAA/Radius (IOS-XE). + type: str + default: "" + inband_day0_bootstrap: + description: + - Support day 0 touchless switch bringup via inband management. + type: bool + default: false + inband_management: + description: + - Import switches with reachability over the switch front-panel ports. + type: bool + default: false + interface_statistics_load_interval: + description: + - Interface Statistics Load Interval Time in seconds. + type: int + default: 10 + local_dhcp_server: + description: + - Automatic IP Assignment For POAP from Local DHCP Server. + type: bool + default: false + management_gateway: + description: + - Default Gateway For Management VRF On The Switch. + type: str + default: "" + management_ipv4_prefix: + description: + - Switch Mgmt IP Subnet Prefix if ipv4. + type: int + default: 24 + management_ipv6_prefix: + description: + - Switch Management IP Subnet Prefix if ipv6. + type: int + default: 64 + monitored_mode: + description: + - If enabled, fabric is only monitored. + - No configuration will be deployed. + type: bool + default: false + mpls_handoff: + description: + - Enable MPLS Handoff. + type: bool + default: false + mpls_loopback_identifier: + description: + - Underlay MPLS Loopback Identifier. + type: int + mpls_loopback_ip_range: + description: + - MPLS Loopback IP Address Range. + type: str + default: "10.102.0.0/25" + netflow_settings: + description: + - Settings associated with netflow. + type: dict + suboptions: + netflow: + description: + - Enable netflow collection. + type: bool + default: false + netflow_exporter_collection: + description: + - List of netflow exporters. + type: list + elements: dict + suboptions: + exporter_name: + description: + - Name of the netflow exporter. + type: str + required: true + exporter_ip: + description: + - IP address of the netflow collector. + type: str + required: true + vrf: + description: + - VRF name for the exporter. + type: str + default: management + source_interface_name: + description: + - Source interface name. + type: str + required: true + udp_port: + description: + - UDP port for netflow export (1-65535). + type: int + netflow_record_collection: + description: + - List of netflow records. + type: list + elements: dict + suboptions: + record_name: + description: + - Name of the netflow record. + type: str + required: true + record_template: + description: + - Template type for the record. + type: str + required: true + layer2_record: + description: + - Enable layer 2 record fields. + type: bool + default: false + netflow_monitor_collection: + description: + - List of netflow monitors. + type: list + elements: dict + suboptions: + monitor_name: + description: + - Name of the netflow monitor. + type: str + required: true + record_name: + description: + - Associated record name. + type: str + required: true + exporter1_name: + description: + - Primary exporter name. + type: str + required: true + exporter2_name: + description: + - Secondary exporter name. + type: str + default: "" + nxapi: + description: + - Enable NX-API over HTTPS. + type: bool + default: false + nxapi_http: + description: + - Enable NX-API over HTTP. + type: bool + default: false + nxapi_http_port: + description: + - HTTP port for NX-API (1-65535). + type: int + default: 80 + nxapi_https_port: + description: + - HTTPS port for NX-API (1-65535). + type: int + default: 443 + performance_monitoring: + description: + - If enabled, switch metrics are collected through periodic SNMP polling. + - Alternative to real-time telemetry. + type: bool + default: false + power_redundancy_mode: + description: + - Default Power Supply Mode for NX-OS Switches. + type: str + default: redundant + choices: [ redundant, combined, inputSrcRedundant ] + ptp: + description: + - Enable Precision Time Protocol (PTP). + type: bool + default: false + ptp_domain_id: + description: + - Multiple Independent PTP Clocking Subdomains on a Single Network. + type: int + default: 0 + ptp_loopback_id: + description: + - Precision Time Protocol Source Loopback Id. + type: int + default: 0 + real_time_backup: + description: + - Hourly Fabric Backup only if there is any config deployment since last backup. + type: bool + real_time_interface_statistics_collection: + description: + - Enable Real Time Interface Statistics Collection. + - Valid for NX-OS only. + type: bool + default: false + scheduled_backup: + description: + - Enable backup at the specified time daily. + type: bool + scheduled_backup_time: + description: + - Time (UTC) in 24 hour format to take a daily backup if enabled (00:00 to 23:59). + type: str + default: "" + snmp_trap: + description: + - Configure Nexus Dashboard as a receiver for SNMP traps. + type: bool + default: true + sub_interface_dot1q_range: + description: + - Per aggregation dot1q range for VRF-Lite connectivity (minimum 2, maximum 4093). + type: str + default: "2-511" + connectivity_domain_name: + description: + - Domain name to connect to Hypershield. + type: str + hypershield_connectivity_proxy_server: + description: + - IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication. + type: str + hypershield_connectivity_proxy_server_port: + description: + - Proxy port number for communication with Hypershield. + type: int + hypershield_connectivity_source_intf: + description: + - Loopback interface on smart switch for communication with Hypershield. + type: str + telemetry_settings: + description: + - Telemetry configuration for the fabric. + type: dict + suboptions: + flow_collection: + description: + - Flow collection settings. + type: dict + suboptions: + traffic_analytics: + description: + - Traffic analytics state. + type: str + default: enabled + traffic_analytics_scope: + description: + - Traffic analytics scope. + type: str + default: intraFabric + operating_mode: + description: + - Operating mode. + type: str + default: flowTelemetry + udp_categorization: + description: + - UDP categorization. + type: str + default: enabled + microburst: + description: + - Microburst detection settings. + type: dict + suboptions: + microburst: + description: + - Enable microburst detection. + type: bool + default: false + sensitivity: + description: + - Microburst sensitivity level. + type: str + default: low + analysis_settings: + description: + - Analysis settings. + type: dict + suboptions: + is_enabled: + description: + - Enable telemetry analysis. + type: bool + default: false + nas: + description: + - NAS telemetry configuration. + type: dict + suboptions: + server: + description: + - NAS server address. + type: str + default: "" + export_settings: + description: + - NAS export settings. + type: dict + suboptions: + export_type: + description: + - Export type. + type: str + default: full + export_format: + description: + - Export format. + type: str + default: json + energy_management: + description: + - Energy management settings. + type: dict + suboptions: + cost: + description: + - Energy cost per unit. + type: float + default: 1.2 + external_streaming_settings: + description: + - External streaming settings for the fabric. + type: dict + suboptions: + email: + description: + - Email streaming configuration. + type: list + elements: dict + message_bus: + description: + - Message bus configuration. + type: list + elements: dict + syslog: + description: + - Syslog streaming configuration. + type: dict + webhooks: + description: + - Webhook configuration. + type: list + elements: dict + state: + description: + - The desired state of the fabric resources on the Cisco Nexus Dashboard. + - Use O(state=merged) to create new fabrics and update existing ones as defined in the configuration. + Resources on ND that are not specified in the configuration will be left unchanged. + - Use O(state=replaced) to replace the fabric configuration specified in the configuration. + Any settings not explicitly provided will revert to their defaults. + - Use O(state=overridden) to enforce the configuration as the single source of truth. + Any fabric existing on ND but not present in the configuration will be deleted. Use with extra caution. + - Use O(state=deleted) to remove the fabrics specified in the configuration from the Cisco Nexus Dashboard. + type: str + default: merged + choices: [ merged, replaced, overridden, deleted ] +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module is only supported on Nexus Dashboard having version 4.1.0 or higher. +- Only External Connectivity fabric type (C(externalConnectivity)) is supported by this module. +- When using O(state=replaced) with only required fields, all optional management settings revert to their defaults. +- The O(config.management.bgp_asn) field is required when creating a fabric. +""" + +EXAMPLES = r""" +- name: Create an External Connectivity fabric using state merged + cisco.nd.nd_manage_fabric_external: + state: merged + config: + - fabric_name: my_ext_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65001" + copp_policy: manual + create_bgp_config: true + cdp: false + snmp_trap: true + nxapi: false + nxapi_http: false + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: false + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + sub_interface_dot1q_range: "2-511" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: result + +- name: Update specific fields on an existing fabric using state merged (partial update) + cisco.nd.nd_manage_fabric_external: + state: merged + config: + - fabric_name: my_ext_fabric + category: fabric + management: + bgp_asn: "65002" + performance_monitoring: true + snmp_trap: false + register: result + +- name: Create or fully replace an External Connectivity fabric using state replaced + cisco.nd.nd_manage_fabric_external: + state: replaced + config: + - fabric_name: my_ext_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65004" + copp_policy: strict + create_bgp_config: true + cdp: true + snmp_trap: false + nxapi: true + nxapi_http: true + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: true + real_time_interface_statistics_collection: true + interface_statistics_load_interval: 30 + sub_interface_dot1q_range: "2-511" + power_redundancy_mode: combined + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: result + +- name: Replace fabric with only required fields (all optional settings revert to defaults) + cisco.nd.nd_manage_fabric_external: + state: replaced + config: + - fabric_name: my_ext_fabric + category: fabric + management: + type: externalConnectivity + bgp_asn: "65004" + register: result + +- name: Delete a specific fabric using state deleted + cisco.nd.nd_manage_fabric_external: + state: deleted + config: + - fabric_name: my_ext_fabric + register: result + +- name: Delete multiple fabrics in a single task + cisco.nd.nd_manage_fabric_external: + state: deleted + config: + - fabric_name: ext_fabric_east + - fabric_name: ext_fabric_west + register: result +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec +from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_external import FabricExternalConnectivityModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.manage_fabric_external import ManageExternalFabricOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDStateMachineError + + +def main(): + argument_spec = nd_argument_spec() + argument_spec.update(FabricExternalConnectivityModel.get_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + # Initialize StateMachine + nd_state_machine = NDStateMachine( + module=module, + model_orchestrator=ManageExternalFabricOrchestrator, + ) + + # Manage state + nd_state_machine.manage_state() + + module.exit_json(**nd_state_machine.output.format()) + + except NDStateMachineError as e: + module.fail_json(msg=str(e)) + except Exception as e: + module.fail_json(msg=f"Module execution failed: {str(e)}") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nd_manage_fabric_ibgp.py b/plugins/modules/nd_manage_fabric_ibgp.py new file mode 100644 index 00000000..61ac1f0d --- /dev/null +++ b/plugins/modules/nd_manage_fabric_ibgp.py @@ -0,0 +1,1888 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: nd_manage_fabric_ibgp +version_added: "1.4.0" +short_description: Manage iBGP VXLAN fabrics on Cisco Nexus Dashboard +description: +- Manage iBGP VXLAN fabrics on Cisco Nexus Dashboard (ND). +- It supports creating, updating, replacing, and deleting iBGP VXLAN fabrics. +author: +- Mike Wiebe (@mwiebe) +options: + config: + description: + - The list of iBGP VXLAN fabrics to configure. + type: list + elements: dict + suboptions: + fabric_name: + description: + - The name of the fabric. + - Only letters, numbers, underscores, and hyphens are allowed. + - The O(config.fabric_name) must be defined when creating, updating or deleting a fabric. + type: str + required: true + category: + description: + - The resource category. + type: str + default: fabric + location: + description: + - The geographic location of the fabric. + type: dict + suboptions: + latitude: + description: + - Latitude coordinate of the fabric location (-90 to 90). + type: float + required: true + longitude: + description: + - Longitude coordinate of the fabric location (-180 to 180). + type: float + required: true + license_tier: + description: + - The license tier for the fabric. + type: str + default: premier + choices: [ essentials, advantage, premier ] + alert_suspend: + description: + - The alert suspension state for the fabric. + type: str + default: disabled + choices: [ enabled, disabled ] + telemetry_collection: + description: + - Enable telemetry collection for the fabric. + type: bool + default: false + telemetry_collection_type: + description: + - The telemetry collection type. + type: str + default: outOfBand + telemetry_streaming_protocol: + description: + - The telemetry streaming protocol. + type: str + default: ipv4 + telemetry_source_interface: + description: + - The telemetry source interface. + type: str + default: "" + telemetry_source_vrf: + description: + - The telemetry source VRF. + type: str + default: "" + security_domain: + description: + - The security domain associated with the fabric. + type: str + default: all + management: + description: + - The iBGP VXLAN management configuration for the fabric. + - Properties are grouped by template section for readability in the module documentation source. + type: dict + suboptions: + # General + type: + description: + - The fabric management type. Must be C(vxlanIbgp) for iBGP VXLAN fabrics. + type: str + default: vxlanIbgp + choices: [ vxlanIbgp ] + bgp_asn: + description: + - The BGP Autonomous System Number for the fabric. + - Accepts a plain integer (1-4294967295) or dotted notation (1-65535.0-65535). + type: str + required: true + underlay_ipv6: + description: + - Enable IPv6 underlay. + type: bool + default: false + fabric_interface_type: + description: + - The fabric interface type. Numbered (Point-to-Point) or unnumbered. + type: str + default: p2p + choices: [ p2p, unNumbered ] + link_state_routing_protocol: + description: + - The underlay link-state routing protocol. + type: str + default: ospf + choices: [ ospf, isis ] + target_subnet_mask: + description: + - The target subnet mask for intra-fabric links (24-31). + type: int + default: 30 + ipv6_link_local: + description: + - Enable IPv6 link-local addressing. + type: bool + default: true + ipv6_subnet_target_mask: + description: + - The IPv6 subnet target mask. + type: int + default: 126 + route_reflector_count: + description: + - The number of spines acting as BGP route reflectors. + type: int + default: 2 + choices: [ 2, 4 ] + anycast_gateway_mac: + description: + - The anycast gateway MAC address in xxxx.xxxx.xxxx format. + type: str + default: 2020.0000.00aa + performance_monitoring: + description: + - Enable performance monitoring. + type: bool + default: false + + # Replication + replication_mode: + description: + - The multicast replication mode. + type: str + default: multicast + choices: [ multicast, ingress ] + multicast_group_subnet: + description: + - The multicast group subnet. + type: str + default: "239.1.1.0/25" + ipv6_multicast_group_subnet: + description: + - The IPv6 multicast group subnet. + type: str + default: "ff1e::/121" + auto_generate_multicast_group_address: + description: + - Automatically generate multicast group addresses. + type: bool + default: false + underlay_multicast_group_address_limit: + description: + - The underlay multicast group address limit. + - The maximum supported value is 128 for NX-OS version 10.2(1) or earlier and 512 for versions above 10.2(1). + type: int + default: 128 + choices: [ 128, 512 ] + tenant_routed_multicast: + description: + - Enable tenant routed multicast. + type: bool + default: false + tenant_routed_multicast_ipv6: + description: + - Enable tenant routed multicast for IPv6. + type: bool + default: false + rendezvous_point_count: + description: + - The number of spines acting as Rendezvous-Points (RPs). + type: int + default: 2 + choices: [ 2, 4 ] + rendezvous_point_mode: + description: + - Multicast rendezvous point mode. For IPv6 underlay, use C(asm) only. + type: str + default: asm + choices: [ asm, bidir ] + rendezvous_point_loopback_id: + description: + - The rendezvous point loopback interface ID (0-1023). + type: int + default: 254 + phantom_rendezvous_point_loopback_id1: + description: + - Underlay phantom RP loopback primary ID for PIM Bi-dir deployments. + type: int + default: 2 + phantom_rendezvous_point_loopback_id2: + description: + - Underlay phantom RP loopback secondary ID for PIM Bi-dir deployments. + type: int + default: 3 + phantom_rendezvous_point_loopback_id3: + description: + - Underlay phantom RP loopback tertiary ID for PIM Bi-dir deployments. + type: int + default: 4 + phantom_rendezvous_point_loopback_id4: + description: + - Underlay phantom RP loopback quaternary ID for PIM Bi-dir deployments. + type: int + default: 5 + anycast_rendezvous_point_ip_range: + description: + - The anycast rendezvous point IP address pool. + type: str + default: "10.254.254.0/24" + ipv6_anycast_rendezvous_point_ip_range: + description: + - The IPv6 anycast rendezvous point IP address pool. + type: str + default: "fd00::254:254:0/118" + l3vni_multicast_group: + description: + - Default underlay multicast group IPv4 address assigned for every overlay VRF. + type: str + default: "239.1.1.0" + l3_vni_ipv6_multicast_group: + description: + - Default underlay multicast group IPv6 address assigned for every overlay VRF. + type: str + default: "ff1e::" + mvpn_vrf_route_import_id: + description: + - Enable MVPN VRI ID generation for Tenant Routed Multicast with IPv4 underlay. + type: bool + default: true + mvpn_vrf_route_import_id_range: + description: + - MVPN VRI ID range (minimum 1, maximum 65535) for vPC. + - Applicable when TRM is enabled with IPv6 underlay, or mvpn_vrf_route_import_id is enabled with IPv4 underlay. + type: str + default: "" + vrf_route_import_id_reallocation: + description: + - One time VRI ID re-allocation based on MVPN VRI ID Range. + type: bool + default: false + + # vPC + vpc_domain_id_range: + description: + - The vPC domain ID range. + type: str + default: "1-1000" + vpc_peer_link_vlan: + description: + - The vPC peer link VLAN ID. + type: str + default: "3600" + vpc_peer_link_enable_native_vlan: + description: + - Enable native VLAN on the vPC peer link. + type: bool + default: false + vpc_peer_keep_alive_option: + description: + - The vPC peer keep-alive option. + type: str + default: management + choices: [ loopback, management ] + vpc_auto_recovery_timer: + description: + - The vPC auto recovery timer in seconds (240-3600). + type: int + default: 360 + vpc_delay_restore_timer: + description: + - The vPC delay restore timer in seconds (1-3600). + type: int + default: 150 + vpc_peer_link_port_channel_id: + description: + - The vPC peer link port-channel ID. + type: str + default: "500" + vpc_ipv6_neighbor_discovery_sync: + description: + - Enable vPC IPv6 neighbor discovery synchronization. + type: bool + default: true + vpc_layer3_peer_router: + description: + - Enable vPC layer-3 peer router. + type: bool + default: true + vpc_tor_delay_restore_timer: + description: + - The vPC TOR delay restore timer. + type: int + default: 30 + fabric_vpc_domain_id: + description: + - Enable fabric vPC domain ID. + type: bool + default: false + shared_vpc_domain_id: + description: + - The shared vPC domain ID. + type: int + default: 1 + fabric_vpc_qos: + description: + - Enable fabric vPC QoS. + type: bool + default: false + fabric_vpc_qos_policy_name: + description: + - The fabric vPC QoS policy name. + type: str + default: spine_qos_for_fabric_vpc_peering + enable_peer_switch: + description: + - Enable peer switch. + type: bool + default: false + advertise_physical_ip: + description: + - Advertise physical IP address for NVE loopback. + type: bool + default: false + advertise_physical_ip_on_border: + description: + - Advertise physical IP address on border switches. + type: bool + default: true + anycast_border_gateway_advertise_physical_ip: + description: + - Enable anycast border gateway to advertise physical IP. + type: bool + default: false + allow_vlan_on_leaf_tor_pairing: + description: + - "Set trunk allowed VLAN to 'none' or 'all' for leaf-TOR pairing port-channels." + type: str + default: none + choices: [ none, all ] + leaf_tor_id_range: + description: + - Use specific vPC/Port-channel ID range for leaf-TOR pairings. + type: bool + default: false + leaf_tor_vpc_port_channel_id_range: + description: + - Specify vPC/Port-channel ID range (minimum 1, maximum 4096) for leaf-TOR pairings. + type: str + default: "1-499" + + # Protocols + ospf_area_id: + description: + - The OSPF area ID. + type: str + default: "0.0.0.0" + bgp_loopback_id: + description: + - The BGP loopback interface ID (0-1023). + type: int + default: 0 + nve_loopback_id: + description: + - The NVE loopback interface ID (0-1023). + type: int + default: 1 + anycast_loopback_id: + description: + - Underlay Anycast Loopback ID. Used for vPC Peering in VXLANv6 Fabrics. + type: int + default: 10 + auto_bgp_neighbor_description: + description: + - Enable automatic BGP neighbor description. + type: bool + default: true + ibgp_peer_template: + description: + - The iBGP peer template name. + type: str + default: "" + leaf_ibgp_peer_template: + description: + - The leaf iBGP peer template name. + type: str + default: "" + link_state_routing_tag: + description: + - The link state routing tag. + type: str + default: UNDERLAY + bgp_authentication: + description: + - Enable BGP authentication. + type: bool + default: false + bgp_authentication_key_type: + description: + - "BGP key encryption type: 3 - 3DES, 6 - Cisco type 6, 7 - Cisco type 7." + type: str + default: 3des + choices: [ 3des, type6, type7 ] + bgp_authentication_key: + description: + - The BGP authentication key. + type: str + default: "" + bfd: + description: + - Enable BFD globally. + type: bool + default: false + bfd_ibgp: + description: + - Enable BFD for iBGP sessions. + type: bool + default: false + bfd_ospf: + description: + - Enable BFD for OSPF. + type: bool + default: false + bfd_isis: + description: + - Enable BFD for IS-IS. + type: bool + default: false + bfd_pim: + description: + - Enable BFD for PIM. + type: bool + default: false + bfd_authentication: + description: + - Enable BFD authentication. + type: bool + default: false + bfd_authentication_key_id: + description: + - The BFD authentication key ID. + type: int + default: 100 + bfd_authentication_key: + description: + - The BFD authentication key. + type: str + default: "" + ospf_authentication: + description: + - Enable OSPF authentication. + type: bool + default: false + ospf_authentication_key_id: + description: + - The OSPF authentication key ID. + type: int + default: 127 + ospf_authentication_key: + description: + - The OSPF authentication key. + type: str + default: "" + pim_hello_authentication: + description: + - Enable PIM hello authentication. + type: bool + default: false + pim_hello_authentication_key: + description: + - The PIM hello authentication key. + type: str + default: "" + isis_level: + description: + - The IS-IS level. + type: str + default: level-2 + choices: [ level-1, level-2 ] + isis_area_number: + description: + - The IS-IS area number. + type: str + default: "0001" + isis_point_to_point: + description: + - Enable IS-IS point-to-point. + type: bool + default: true + isis_authentication: + description: + - Enable IS-IS authentication. + type: bool + default: false + isis_authentication_keychain_name: + description: + - The IS-IS authentication keychain name. + type: str + default: "" + isis_authentication_keychain_key_id: + description: + - The IS-IS authentication keychain key ID. + type: int + default: 127 + isis_authentication_key: + description: + - The IS-IS authentication key. + type: str + default: "" + isis_overload: + description: + - Enable IS-IS overload bit. + type: bool + default: true + isis_overload_elapse_time: + description: + - The IS-IS overload elapse time in seconds. + type: int + default: 60 + + # Security + security_group_tag: + description: + - Enable Security Group Tag (SGT) support. + type: bool + default: false + security_group_tag_prefix: + description: + - The SGT prefix. + type: str + default: SG_ + security_group_tag_mac_segmentation: + description: + - Enable SGT MAC segmentation. + type: bool + default: false + security_group_tag_id_range: + description: + - The SGT ID range. + type: str + default: "10000-14000" + security_group_tag_preprovision: + description: + - Enable SGT pre-provisioning. + type: bool + default: false + security_group_status: + description: + - The security group status. + type: str + default: disabled + choices: [ enabled, enabledStrict, enabledLoose, enablePending, enablePendingStrict, enablePendingLoose, disablePending, disabled ] + macsec: + description: + - Enable MACsec on intra-fabric links. + type: bool + default: false + macsec_cipher_suite: + description: + - The MACsec cipher suite. + type: str + default: GCM-AES-XPN-256 + choices: [ GCM-AES-128, GCM-AES-256, GCM-AES-XPN-128, GCM-AES-XPN-256 ] + macsec_key_string: + description: + - The MACsec primary key string. + type: str + default: "" + macsec_algorithm: + description: + - The MACsec primary cryptographic algorithm. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + macsec_fallback_key_string: + description: + - The MACsec fallback key string. + type: str + default: "" + macsec_fallback_algorithm: + description: + - The MACsec fallback cryptographic algorithm. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + macsec_report_timer: + description: + - The MACsec report timer. + type: int + default: 5 + vrf_lite_macsec: + description: + - Enable MACsec on DCI links. + type: bool + default: false + vrf_lite_macsec_cipher_suite: + description: + - The DCI MACsec cipher suite. + type: str + default: GCM-AES-XPN-256 + choices: [ GCM-AES-128, GCM-AES-256, GCM-AES-XPN-128, GCM-AES-XPN-256 ] + vrf_lite_macsec_key_string: + description: + - The DCI MACsec primary key string (Cisco Type 7 Encrypted Octet String). + type: str + default: "" + vrf_lite_macsec_algorithm: + description: + - The DCI MACsec primary cryptographic algorithm. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + vrf_lite_macsec_fallback_key_string: + description: + - The DCI MACsec fallback key string (Cisco Type 7 Encrypted Octet String). + - This parameter is used when DCI link has QKD disabled. + type: str + default: "" + vrf_lite_macsec_fallback_algorithm: + description: + - The DCI MACsec fallback cryptographic algorithm. + - This parameter is used when DCI link has QKD disabled. + type: str + default: AES_128_CMAC + choices: [ AES_128_CMAC, AES_256_CMAC ] + quantum_key_distribution: + description: + - Enable quantum key distribution. + type: bool + default: false + quantum_key_distribution_profile_name: + description: + - The quantum key distribution profile name. + type: str + default: "" + key_management_entity_server_ip: + description: + - The key management entity server IP address. + type: str + default: "" + key_management_entity_server_port: + description: + - The key management entity server port. + type: int + default: 0 + trustpoint_label: + description: + - The trustpoint label for TLS authentication. + type: str + default: "" + skip_certificate_verification: + description: + - Skip verification of incoming certificate. + type: bool + default: false + + # Advanced + site_id: + description: + - The site identifier for the fabric (for EVPN Multi-Site support). + - Must be a numeric value between 1 and 281474976710655. + - Defaults to the value of O(config.management.bgp_asn) if not provided. + type: str + default: "" + overlay_mode: + description: + - The overlay configuration mode. + type: str + default: cli + choices: [ cli, config-profile ] + vrf_template: + description: + - The VRF template name. + type: str + default: Default_VRF_Universal + network_template: + description: + - The network template name. + type: str + default: Default_Network_Universal + vrf_extension_template: + description: + - The VRF extension template name. + type: str + default: Default_VRF_Extension_Universal + network_extension_template: + description: + - The network extension template name. + type: str + default: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: + description: + - Enable L3 VNI no-VLAN default option. + type: bool + default: false + fabric_mtu: + description: + - The fabric MTU size (1500-9216). + type: int + default: 9216 + l2_host_interface_mtu: + description: + - The L2 host interface MTU size (1500-9216). + type: int + default: 9216 + tenant_dhcp: + description: + - Enable tenant DHCP. + type: bool + default: true + snmp_trap: + description: + - Enable SNMP traps. + type: bool + default: true + cdp: + description: + - Enable CDP. + type: bool + default: false + tcam_allocation: + description: + - Enable TCAM allocation. + type: bool + default: true + real_time_interface_statistics_collection: + description: + - Enable real-time interface statistics collection. + type: bool + default: false + interface_statistics_load_interval: + description: + - The interface statistics load interval in seconds. + type: int + default: 10 + greenfield_debug_flag: + description: + - Allow switch configuration to be cleared without a reload when preserveConfig is set to false. + type: str + default: disable + choices: [ enable, disable ] + nxapi: + description: + - Enable NX-API (HTTPS). + type: bool + default: false + nxapi_https_port: + description: + - The NX-API HTTPS port (1-65535). + type: int + default: 443 + nxapi_http: + description: + - Enable NX-API over HTTP. + type: bool + default: false + nxapi_http_port: + description: + - The NX-API HTTP port (1-65535). + type: int + default: 80 + default_queuing_policy: + description: + - Enable default queuing policies. + type: bool + default: false + default_queuing_policy_cloudscale: + description: + - Queuing policy for all 92xx, -EX, -FX, -FX2, -FX3, -GX series switches in the fabric. + type: str + default: queuing_policy_default_8q_cloudscale + default_queuing_policy_r_series: + description: + - Queuing policy for all Nexus R-series switches. + type: str + default: queuing_policy_default_r_series + default_queuing_policy_other: + description: + - Queuing policy for all other switches in the fabric. + type: str + default: queuing_policy_default_other + aiml_qos: + description: + - Enable AI/ML QoS. Configures QoS and queuing policies specific to N9K Cloud Scale and Silicon One switch fabric + for AI network workloads. + type: bool + default: false + aiml_qos_policy: + description: + - Queuing policy based on predominant fabric link speed. + type: str + default: 400G + choices: [ 800G, 400G, 100G, 25G, User-defined ] + roce_v2: + description: + - DSCP for RDMA traffic. Numeric (0-63) with ranges/comma, or named values. + type: str + default: "26" + cnp: + description: + - DSCP value for Congestion Notification. Numeric (0-63) with ranges/comma, or named values. + type: str + default: "48" + wred_min: + description: + - WRED minimum threshold (in kbytes). + type: int + default: 950 + wred_max: + description: + - WRED maximum threshold (in kbytes). + type: int + default: 3000 + wred_drop_probability: + description: + - WRED drop probability percentage. + type: int + default: 7 + wred_weight: + description: + - Influences how quickly WRED reacts to queue depth changes. + type: int + default: 0 + bandwidth_remaining: + description: + - Percentage of remaining bandwidth allocated to AI traffic queues. + type: int + default: 50 + dlb: + description: + - Enable fabric-level Dynamic Load Balancing (DLB). Inter-Switch-Links will be configured as DLB interfaces. + type: bool + default: false + dlb_mode: + description: + - "Select system-wide DLB mode: flowlet, per-packet (packet spraying), or policy driven mixed mode. + Mixed mode is supported on Silicon One (S1) platform only." + type: str + default: flowlet + choices: [ flowlet, per-packet, policy-driven-flowlet, policy-driven-per-packet, policy-driven-mixed-mode ] + dlb_mixed_mode_default: + description: + - Default load balancing mode for policy driven mixed mode DLB. + type: str + default: ecmp + choices: [ ecmp, flowlet, per-packet ] + flowlet_aging: + description: + - "Flowlet aging timer in microseconds. Valid range depends on platform: Cloud Scale (CS)=1-2000000, + Silicon One (S1)=1-1024." + type: int + default: 1 + flowlet_dscp: + description: + - DSCP values for flowlet load balancing. Numeric (0-63) with ranges/comma, or named values. + type: str + default: "" + per_packet_dscp: + description: + - DSCP values for per-packet load balancing. Numeric (0-63) with ranges/comma, or named values. + type: str + default: "" + ai_load_sharing: + description: + - Enable IP load sharing using source and destination address for AI workloads. + type: bool + default: false + priority_flow_control_watch_interval: + description: + - PFC watch interval in milliseconds (101-1000). Leave blank for system default (100ms). + type: int + default: 101 + ptp: + description: + - Enable Precision Time Protocol (PTP). + type: bool + default: false + ptp_loopback_id: + description: + - The PTP loopback ID. + type: int + default: 0 + ptp_domain_id: + description: + - The PTP domain ID for multiple independent PTP clocking subdomains on a single network. + type: int + default: 0 + ptp_vlan_id: + description: + - Precision Time Protocol (PTP) source VLAN ID. SVI used for PTP source on ToRs. + type: int + default: 2 + stp_root_option: + description: + - "Which protocol to use for configuring root bridge: rpvst+ (Rapid Per-VLAN Spanning Tree), + mst (Multiple Spanning Tree), or unmanaged (STP Root not managed by ND)." + type: str + default: unmanaged + choices: [ rpvst+, mst, unmanaged ] + stp_vlan_range: + description: + - The STP VLAN range (minimum 1, maximum 4094). + type: str + default: "1-3967" + mst_instance_range: + description: + - The MST instance range (minimum 0, maximum 4094). + type: str + default: "0" + stp_bridge_priority: + description: + - The STP bridge priority. + type: int + default: 0 + mpls_handoff: + description: + - Enable MPLS handoff. + type: bool + default: false + mpls_loopback_identifier: + description: + - The MPLS loopback identifier used for VXLAN to MPLS SR/LDP Handoff. + type: int + default: 101 + mpls_isis_area_number: + description: + - IS-IS area number for DCI MPLS link. Used only if routing protocol on DCI MPLS link is IS-IS. + type: str + default: "0001" + mpls_loopback_ip_range: + description: + - The MPLS loopback IP address pool. + type: str + default: "10.101.0.0/25" + private_vlan: + description: + - Enable PVLAN on switches except spines and super spines. + type: bool + default: false + default_private_vlan_secondary_network_template: + description: + - Default PVLAN secondary network template. + type: str + default: Pvlan_Secondary_Network + nve_hold_down_timer: + description: + - The NVE hold-down timer in seconds. + type: int + default: 180 + next_generation_oam: + description: + - Enable the Next Generation (NG) OAM feature for all switches in the fabric. + type: bool + default: true + ngoam_south_bound_loop_detect: + description: + - Enable the Next Generation (NG) OAM southbound loop detection. + type: bool + default: false + ngoam_south_bound_loop_detect_probe_interval: + description: + - Set NG OAM southbound loop detection probe interval in seconds. + type: int + default: 300 + ngoam_south_bound_loop_detect_recovery_interval: + description: + - Set NG OAM southbound loop detection recovery interval in seconds. + type: int + default: 600 + strict_config_compliance_mode: + description: + - Enable bi-directional compliance checks to flag additional configs in the running config + that are not in the intent/expected config. + type: bool + default: false + advanced_ssh_option: + description: + - Enable AAA IP Authorization. Enable only when IP Authorization is enabled in the AAA Server. + type: bool + default: false + copp_policy: + description: + - The fabric wide CoPP policy. Customized CoPP policy should be provided when C(manual) is selected. + type: str + default: strict + choices: [ dense, lenient, moderate, strict, manual ] + power_redundancy_mode: + description: + - Default power supply mode for NX-OS switches. + type: str + default: redundant + choices: [ redundant, combined, inputSrcRedundant ] + host_interface_admin_state: + description: + - Enable host interface admin state. + type: bool + default: true + heartbeat_interval: + description: + - The heartbeat interval. + type: int + default: 190 + policy_based_routing: + description: + - Enable policy-based routing. + type: bool + default: false + brownfield_network_name_format: + description: + - The brownfield network name format. + type: str + default: "Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$" + brownfield_skip_overlay_network_attachments: + description: + - Skip brownfield overlay network attachments. + type: bool + default: false + + # Freeform + extra_config_leaf: + description: + - Extra freeform configuration applied to leaf switches. + type: str + default: "" + extra_config_spine: + description: + - Extra freeform configuration applied to spine switches. + type: str + default: "" + extra_config_tor: + description: + - Extra freeform configuration applied to TOR switches. + type: str + default: "" + extra_config_intra_fabric_links: + description: + - Extra freeform configuration applied to intra-fabric links. + type: str + default: "" + pre_interface_config_leaf: + description: + - Additional CLIs added before interface configurations for all switches with a VTEP + unless they have some spine role. + type: str + default: "" + pre_interface_config_spine: + description: + - Additional CLIs added before interface configurations for all switches with some spine role. + type: str + default: "" + pre_interface_config_tor: + description: + - Additional CLIs added before interface configurations for all ToRs. + type: str + default: "" + + # Resources + static_underlay_ip_allocation: + description: + - Enable static underlay IP allocation. + type: bool + default: false + bgp_loopback_ip_range: + description: + - The BGP loopback IP address pool. + type: str + default: "10.2.0.0/22" + nve_loopback_ip_range: + description: + - The NVE loopback IP address pool. + type: str + default: "10.3.0.0/22" + bgp_loopback_ipv6_range: + description: + - The BGP loopback IPv6 address pool. + type: str + default: "fd00::a02:0/119" + nve_loopback_ipv6_range: + description: + - The NVE loopback IPv6 address pool. + type: str + default: "fd00::a03:0/118" + intra_fabric_subnet_range: + description: + - The intra-fabric subnet IP address pool. + type: str + default: "10.4.0.0/16" + ipv6_subnet_range: + description: + - The IPv6 subnet range. + type: str + default: "fd00::a04:0/112" + router_id_range: + description: + - The BGP router ID range in IPv4 subnet format. Used for IPv6 underlay. + type: str + default: "10.2.0.0/23" + l2_vni_range: + description: + - The Layer 2 VNI range. + type: str + default: "30000-49000" + l3_vni_range: + description: + - The Layer 3 VNI range. + type: str + default: "50000-59000" + network_vlan_range: + description: + - The network VLAN range. + type: str + default: "2300-2999" + vrf_vlan_range: + description: + - The VRF VLAN range. + type: str + default: "2000-2299" + sub_interface_dot1q_range: + description: + - The sub-interface 802.1q range (minimum 2, maximum 4093). + type: str + default: "2-511" + vrf_lite_auto_config: + description: + - "VRF Lite Inter-Fabric Connection deployment options. If C(back2BackAndToExternal) is selected, + VRF Lite IFCs are auto created between border devices of two Easy Fabrics, and between + border devices in Easy Fabric and edge routers in External Fabric." + type: str + default: manual + choices: [ manual, back2BackAndToExternal ] + vrf_lite_subnet_range: + description: + - The VRF lite subnet IP address pool. + type: str + default: "10.33.0.0/16" + vrf_lite_subnet_target_mask: + description: + - The VRF lite subnet target mask. + type: int + default: 30 + auto_unique_vrf_lite_ip_prefix: + description: + - Enable auto unique VRF lite IP prefix. + type: bool + default: false + auto_symmetric_vrf_lite: + description: + - Enable auto symmetric VRF lite. + type: bool + default: false + auto_vrf_lite_default_vrf: + description: + - Enable auto VRF lite for the default VRF. + type: bool + default: false + auto_symmetric_default_vrf: + description: + - Enable auto symmetric default VRF. + type: bool + default: false + default_vrf_redistribution_bgp_route_map: + description: + - Route Map used to redistribute BGP routes to IGP in default VRF in auto created VRF Lite IFC links. + type: str + default: extcon-rmap-filter + per_vrf_loopback_auto_provision: + description: + - Enable per-VRF loopback auto-provisioning. + type: bool + default: false + per_vrf_loopback_ip_range: + description: + - The per-VRF loopback IP address pool. + type: str + default: "10.5.0.0/22" + per_vrf_loopback_auto_provision_ipv6: + description: + - Enable per-VRF loopback auto-provisioning for IPv6. + type: bool + default: false + per_vrf_loopback_ipv6_range: + description: + - The per-VRF loopback IPv6 address pool. + type: str + default: "fd00::a05:0/112" + per_vrf_unique_loopback_auto_provision: + description: + - Auto provision a unique IPv4 loopback on a VTEP on VRF attachment. + - This option and per VRF per VTEP loopback auto-provisioning are mutually exclusive. + type: bool + default: false + per_vrf_unique_loopback_ip_range: + description: + - Prefix pool to assign unique IPv4 addresses to loopbacks on VTEPs on a per VRF basis. + type: str + default: "10.6.0.0/22" + per_vrf_unique_loopback_auto_provision_v6: + description: + - Auto provision a unique IPv6 loopback on a VTEP on VRF attachment. + type: bool + default: false + per_vrf_unique_loopback_ipv6_range: + description: + - Prefix pool to assign unique IPv6 addresses to loopbacks on VTEPs on a per VRF basis. + type: str + default: "fd00::a06:0/112" + ip_service_level_agreement_id_range: + description: + - The IP SLA ID range. + type: str + default: "10000-19999" + object_tracking_number_range: + description: + - The object tracking number range. + type: str + default: "100-299" + route_map_sequence_number_range: + description: + - The route map sequence number range (minimum 1, maximum 65534). + type: str + default: "1-65534" + service_network_vlan_range: + description: + - Per Switch Overlay Service Network VLAN Range (minimum 2, maximum 4094). + type: str + default: "3000-3199" + + # Manageability + inband_management: + description: + - Manage switches with only inband connectivity. + type: bool + default: false + aaa: + description: + - Enable AAA. + type: bool + default: false + extra_config_aaa: + description: + - Extra freeform AAA configuration. + type: str + default: "" + banner: + description: + - The fabric banner text displayed on switch login. + type: str + default: "" + ntp_server_collection: + description: + - The list of NTP server IP addresses. + type: list + elements: str + ntp_server_vrf_collection: + description: + - The list of VRFs for NTP servers. + type: list + elements: str + dns_collection: + description: + - The list of DNS server IP addresses. + type: list + elements: str + dns_vrf_collection: + description: + - The list of VRFs for DNS servers. + type: list + elements: str + syslog_server_collection: + description: + - The list of syslog server IP addresses. + type: list + elements: str + syslog_server_vrf_collection: + description: + - The list of VRFs for syslog servers. + type: list + elements: str + syslog_severity_collection: + description: + - The list of syslog severity levels (0-7). + type: list + elements: int + + # Hypershield + allow_smart_switch_onboarding: + description: + - Enable onboarding of smart switches to Hypershield for firewall service. + type: bool + default: false + connectivity_domain_name: + description: + - Domain name to connect to Hypershield. + type: str + hypershield_connectivity_proxy_server: + description: + - IPv4 address, IPv6 address, or DNS name of the proxy server for Hypershield communication. + type: str + hypershield_connectivity_proxy_server_port: + description: + - Proxy port number for communication with Hypershield. + type: int + hypershield_connectivity_source_intf: + description: + - Loopback interface on smart switch for communication with Hypershield. + type: str + + # Bootstrap + day0_bootstrap: + description: + - Enable day-0 bootstrap (POAP). + type: bool + default: false + local_dhcp_server: + description: + - Enable local DHCP server for bootstrap. + type: bool + default: false + dhcp_protocol_version: + description: + - The IP protocol version for local DHCP server. + type: str + default: dhcpv4 + choices: [ dhcpv4, dhcpv6 ] + dhcp_start_address: + description: + - The DHCP start address for bootstrap. + type: str + default: "" + dhcp_end_address: + description: + - The DHCP end address for bootstrap. + type: str + default: "" + management_gateway: + description: + - The management gateway for bootstrap. + type: str + default: "" + management_ipv4_prefix: + description: + - The management IPv4 prefix length for bootstrap. + type: int + default: 24 + management_ipv6_prefix: + description: + - The management IPv6 prefix length for bootstrap. + type: int + default: 64 + bootstrap_subnet_collection: + description: + - List of IPv4 or IPv6 subnets to be used for bootstrap. + - When O(state=merged), omitting this option preserves the existing collection. + - When O(state=merged), providing this option replaces the entire collection with the supplied list. + - Under O(state=merged), entries in this list are not merged item-by-item. + - Under O(state=merged), removing one entry from the playbook removes it from the fabric, and setting an empty list clears the collection. + - When O(state=replaced), this option is also treated as the exact desired collection. + - When O(state=replaced), omitting this option resets the collection to its default empty value. + type: list + elements: dict + suboptions: + start_ip: + description: + - Starting IP address of the bootstrap range. + type: str + required: true + end_ip: + description: + - Ending IP address of the bootstrap range. + type: str + required: true + default_gateway: + description: + - Default gateway for bootstrap subnet. + type: str + required: true + subnet_prefix: + description: + - Subnet prefix length (8-30). + type: int + required: true + seed_switch_core_interfaces: + description: + - Seed switch fabric interfaces. Core-facing interface list on seed switch. + type: list + elements: str + spine_switch_core_interfaces: + description: + - Spine switch fabric interfaces. Core-facing interface list on all spines. + type: list + elements: str + inband_dhcp_servers: + description: + - List of external DHCP server IP addresses (Max 3). + type: list + elements: str + extra_config_nxos_bootstrap: + description: + - Additional CLIs required during device bootup/login (e.g. AAA/Radius). + type: str + default: "" + unnumbered_bootstrap_loopback_id: + description: + - Bootstrap Seed Switch Loopback Interface ID. + type: int + default: 253 + unnumbered_dhcp_start_address: + description: + - Switch Loopback DHCP Scope Start Address. Must be a subset of IGP/BGP Loopback Prefix Pool. + type: str + default: "" + unnumbered_dhcp_end_address: + description: + - Switch Loopback DHCP Scope End Address. Must be a subset of IGP/BGP Loopback Prefix Pool. + type: str + default: "" + + # Configuration Backup + real_time_backup: + description: + - Enable real-time backup. + type: bool + default: false + scheduled_backup: + description: + - Enable scheduled backup. + type: bool + default: false + scheduled_backup_time: + description: + - The scheduled backup time. + type: str + default: "" + + # Flow Monitor + netflow_settings: + description: + - Settings associated with netflow. + type: dict + suboptions: + netflow: + description: + - Enable netflow collection. + type: bool + default: false + netflow_exporter_collection: + description: + - List of netflow exporters. + type: list + elements: dict + suboptions: + exporter_name: + description: + - Name of the netflow exporter. + type: str + required: true + exporter_ip: + description: + - IP address of the netflow collector. + type: str + required: true + vrf: + description: + - VRF name for the exporter. + type: str + default: management + source_interface_name: + description: + - Source interface name. + type: str + required: true + udp_port: + description: + - UDP port for netflow export (1-65535). + type: int + netflow_record_collection: + description: + - List of netflow records. + type: list + elements: dict + suboptions: + record_name: + description: + - Name of the netflow record. + type: str + required: true + record_template: + description: + - Template type for the record. + type: str + required: true + layer2_record: + description: + - Enable layer 2 record fields. + type: bool + default: false + netflow_monitor_collection: + description: + - List of netflow monitors. + type: list + elements: dict + suboptions: + monitor_name: + description: + - Name of the netflow monitor. + type: str + required: true + record_name: + description: + - Associated record name. + type: str + required: true + exporter1_name: + description: + - Primary exporter name. + type: str + required: true + exporter2_name: + description: + - Secondary exporter name. + type: str + default: "" + state: + description: + - The desired state of the fabric resources on the Cisco Nexus Dashboard. + - Use O(state=merged) to create new fabrics and update existing ones as defined in the configuration. + Resources on ND that are not specified in the configuration will be left unchanged. + - Use O(state=replaced) to replace the fabric configuration specified in the configuration. + Any settings not explicitly provided will revert to their defaults. + - Use O(state=overridden) to enforce the configuration as the single source of truth. + Any fabric existing on ND but not present in the configuration will be deleted. Use with extra caution. + - Use O(state=deleted) to remove the fabrics specified in the configuration from the Cisco Nexus Dashboard. + type: str + default: merged + choices: [ merged, replaced, overridden, deleted ] +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module is only supported on Nexus Dashboard having version 4.1.0 or higher. +- Only iBGP VXLAN fabric type (C(vxlanIbgp)) is supported by this module. +- When using O(state=replaced) with only required fields, all optional management settings revert to their defaults. +- The O(config.management.bgp_asn) field is required when creating a fabric. +- O(config.management.site_id) defaults to the value of O(config.management.bgp_asn) if not provided. +""" + +EXAMPLES = r""" +- name: Create an iBGP VXLAN fabric using state merged + cisco.nd.nd_manage_fabric_ibgp: + state: merged + config: + - fabric_name: my_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65001" + site_id: "65001" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00aa" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: true + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: enable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: result + +- name: Update specific fields on an existing fabric using state merged (partial update) + cisco.nd.nd_manage_fabric_ibgp: + state: merged + config: + - fabric_name: my_fabric + category: fabric + management: + bgp_asn: "65002" + site_id: "65002" + anycast_gateway_mac: "2020.0000.00bb" + performance_monitoring: true + register: result + +- name: Create or fully replace an iBGP VXLAN fabric using state replaced + cisco.nd.nd_manage_fabric_ibgp: + state: replaced + config: + - fabric_name: my_fabric + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65004" + site_id: "65004" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" + performance_monitoring: true + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 3 + rendezvous_point_loopback_id: 253 + vpc_peer_link_vlan: "3700" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 300 + vpc_delay_restore_timer: 120 + vpc_peer_link_port_channel_id: "600" + vpc_ipv6_neighbor_discovery_sync: false + advertise_physical_ip: true + vpc_domain_id_range: "1-800" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9000 + l2_host_interface_mtu: 9000 + tenant_dhcp: false + nxapi: false + nxapi_https_port: 443 + nxapi_http: true + nxapi_http_port: 80 + snmp_trap: false + anycast_border_gateway_advertise_physical_ip: true + greenfield_debug_flag: disable + tcam_allocation: false + real_time_interface_statistics_collection: true + interface_statistics_load_interval: 30 + bgp_loopback_ip_range: "10.22.0.0/22" + nve_loopback_ip_range: "10.23.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.252.0/24" + intra_fabric_subnet_range: "10.24.0.0/16" + l2_vni_range: "40000-59000" + l3_vni_range: "60000-69000" + network_vlan_range: "2400-3099" + vrf_vlan_range: "2100-2399" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.53.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.25.0.0/22" + per_vrf_loopback_auto_provision_ipv6: true + per_vrf_loopback_ipv6_range: "fd00::a25:0/112" + banner: "^ Managed by Ansible ^" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: result + +- name: Replace fabric with only required fields (all optional settings revert to defaults) + cisco.nd.nd_manage_fabric_ibgp: + state: replaced + config: + - fabric_name: my_fabric + category: fabric + management: + type: vxlanIbgp + bgp_asn: "65004" + site_id: "65004" + banner: "^ Managed by Ansible ^" + register: result + +- name: Enforce exact fabric inventory using state overridden (deletes unlisted fabrics) + cisco.nd.nd_manage_fabric_ibgp: + state: overridden + config: + - fabric_name: fabric_east + category: fabric + location: + latitude: 40.7128 + longitude: -74.0060 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65010" + site_id: "65010" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0010" + replication_mode: multicast + multicast_group_subnet: "239.1.10.0/25" + bgp_loopback_ip_range: "10.10.0.0/22" + nve_loopback_ip_range: "10.11.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.10.0/24" + intra_fabric_subnet_range: "10.12.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + - fabric_name: fabric_west + category: fabric + location: + latitude: 34.0522 + longitude: -118.2437 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65020" + site_id: "65020" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0020" + replication_mode: multicast + multicast_group_subnet: "239.1.20.0/25" + bgp_loopback_ip_range: "10.20.0.0/22" + nve_loopback_ip_range: "10.21.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.20.0/24" + intra_fabric_subnet_range: "10.22.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + register: result + +- name: Delete a specific fabric using state deleted + cisco.nd.nd_manage_fabric_ibgp: + state: deleted + config: + - fabric_name: my_fabric + register: result + +- name: Delete multiple fabrics in a single task + cisco.nd.nd_manage_fabric_ibgp: + state: deleted + config: + - fabric_name: fabric_east + - fabric_name: fabric_west + - fabric_name: fabric_old + register: result +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd import nd_argument_spec +from ansible_collections.cisco.nd.plugins.module_utils.nd_state_machine import NDStateMachine +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_fabric.manage_fabric_ibgp import FabricIbgpModel +from ansible_collections.cisco.nd.plugins.module_utils.orchestrators.manage_fabric_ibgp import ManageIbgpFabricOrchestrator +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDStateMachineError + + +def main(): + argument_spec = nd_argument_spec() + argument_spec.update(FabricIbgpModel.get_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + # Initialize StateMachine + nd_state_machine = NDStateMachine( + module=module, + model_orchestrator=ManageIbgpFabricOrchestrator, + ) + + # Manage state + nd_state_machine.manage_state() + + module.exit_json(**nd_state_machine.output.format()) + + except NDStateMachineError as e: + module.fail_json(msg=str(e)) + except Exception as e: + module.fail_json(msg=f"Module execution failed: {str(e)}") + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/nd_manage_fabric/tasks/fabric_ebgp.yaml b/tests/integration/targets/nd_manage_fabric/tasks/fabric_ebgp.yaml new file mode 100644 index 00000000..80671b5d --- /dev/null +++ b/tests/integration/targets/nd_manage_fabric/tasks/fabric_ebgp.yaml @@ -0,0 +1,1228 @@ +--- +# Test code for the ND modules +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Test that we have a Nexus Dashboard host, username and password + ansible.builtin.fail: + msg: 'Please define the following variables: ansible_host, ansible_user and ansible_password.' + when: ansible_host is not defined or ansible_user is not defined or ansible_password is not defined + +- name: Set vars + ansible.builtin.set_fact: + nd_info: &nd_info + output_level: '{{ api_key_output_level | default("debug") }}' + +############################################################################# +# CLEANUP - Ensure clean state before tests +############################################################################# +- name: Clean up any existing test fabrics before starting tests + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ebgp_test_fabric_merged }}" + - fabric_name: "{{ ebgp_test_fabric_replaced }}" + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + tags: always + +############################################################################# +# TEST 1: STATE MERGED - Create fabric using merged state +############################################################################# +- name: "TEST 1a: Create eBGP fabric using state merged (first run)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': ebgp_test_fabric_merged} | combine(fabric_config_ebgp) }}" + register: ebgp_merged_result_1 + tags: [test_merged, test_merged_create] + +- name: "TEST 1a: Verify eBGP fabric was created using merged state" + assert: + that: + - ebgp_merged_result_1 is changed + - ebgp_merged_result_1 is not failed + fail_msg: "eBGP fabric creation with state merged failed" + success_msg: "eBGP fabric successfully created with state merged" + tags: [test_merged, test_merged_create] + +- name: "TEST 1b: Create eBGP fabric using state merged (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': ebgp_test_fabric_merged} | combine(fabric_config_ebgp) }}" + register: ebgp_merged_result_2 + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1b: Verify merged state is idempotent" + assert: + that: + - ebgp_merged_result_2 is not changed + - ebgp_merged_result_2 is not failed + fail_msg: "Merged state is not idempotent - should not change when run twice with same config" + success_msg: "Merged state is idempotent - no changes on second run" + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1c: Update eBGP fabric using state merged (modify existing)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: merged + config: + - fabric_name: "{{ ebgp_test_fabric_merged }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65002" # Changed from 65001 + bgp_asn_auto_allocation: false + site_id: "65002" # Changed from 65001 + bgp_as_mode: multiAS + bgp_allow_as_in_num: 1 + bgp_max_path: 4 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00bb" # Changed from 00aa + performance_monitoring: true # Changed from false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: false + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: disable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: ebgp_merged_result_3 + tags: [test_merged, test_merged_update] + +- name: "TEST 1c: Verify eBGP fabric was updated using merged state" + assert: + that: + - ebgp_merged_result_3 is changed + - ebgp_merged_result_3 is not failed + fail_msg: "eBGP fabric update with state merged failed" + success_msg: "eBGP fabric successfully updated with state merged" + tags: [test_merged, test_merged_update] + +############################################################################# +# VALIDATION: Query ebgp_test_fabric_merged and validate expected changes +############################################################################# +- name: "VALIDATION 1: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Query ebgp_test_fabric_merged configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ ebgp_test_fabric_merged }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: ebgp_merged_fabric_query + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Parse eBGP fabric configuration response" + set_fact: + ebgp_merged_fabric_config: "{{ ebgp_merged_fabric_query.json }}" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify BGP ASN was updated to 65002" + assert: + that: + - ebgp_merged_fabric_config.management.bgpAsn == "65002" + fail_msg: "BGP ASN validation failed. Expected: 65002, Actual: {{ ebgp_merged_fabric_config.management.bgpAsn }}" + success_msg: "✓ BGP ASN correctly updated to 65002" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify Site ID was updated to 65002" + assert: + that: + - ebgp_merged_fabric_config.management.siteId == "65002" + fail_msg: "Site ID validation failed. Expected: 65002, Actual: {{ ebgp_merged_fabric_config.management.siteId }}" + success_msg: "✓ Site ID correctly updated to 65002" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify Anycast Gateway MAC was updated to 2020.0000.00bb" + assert: + that: + - ebgp_merged_fabric_config.management.anycastGatewayMac == "2020.0000.00bb" + fail_msg: "Anycast Gateway MAC validation failed. Expected: 2020.0000.00bb, Actual: {{ ebgp_merged_fabric_config.management.anycastGatewayMac }}" + success_msg: "✓ Anycast Gateway MAC correctly updated to 2020.0000.00bb" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify Performance Monitoring was enabled" + assert: + that: + - ebgp_merged_fabric_config.management.performanceMonitoring == true + fail_msg: "Performance Monitoring validation failed. Expected: true, Actual: {{ ebgp_merged_fabric_config.management.performanceMonitoring }}" + success_msg: "✓ Performance Monitoring correctly enabled" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify BGP AS Mode is multiAS" + assert: + that: + - ebgp_merged_fabric_config.management.bgpAsMode == "multiAS" + fail_msg: "BGP AS Mode validation failed. Expected: multiAS, Actual: {{ ebgp_merged_fabric_config.management.bgpAsMode }}" + success_msg: "✓ BGP AS Mode correctly set to multiAS" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Display successful validation summary for ebgp_test_fabric_merged" + debug: + msg: | + ======================================== + VALIDATION SUMMARY for ebgp_test_fabric_merged: + ======================================== + ✓ BGP ASN: {{ ebgp_merged_fabric_config.management.bgpAsn }} + ✓ Site ID: {{ ebgp_merged_fabric_config.management.siteId }} + ✓ Anycast Gateway MAC: {{ ebgp_merged_fabric_config.management.anycastGatewayMac }} + ✓ Performance Monitoring: {{ ebgp_merged_fabric_config.management.performanceMonitoring }} + ✓ BGP AS Mode: {{ ebgp_merged_fabric_config.management.bgpAsMode }} + + All 5 expected changes validated successfully! + ======================================== + tags: [test_merged, test_merged_validation] + +############################################################################# +# TEST 2: STATE REPLACED - Create and manage fabric using replaced state +############################################################################# +- name: "TEST 2a: Create eBGP fabric using state replaced (first run)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ebgp_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65004" # Different from default ASN + bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65100" + site_id: "65004" # Different from default site_id + bgp_as_mode: multiAS # Different from default multiAS + bgp_allow_as_in_num: 2 # Different from default 1 + bgp_max_path: 8 # Different from default 4 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" # Different from default MAC + performance_monitoring: true # Different from default false + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" # Different from default subnet + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 4 # Different from default 2 + rendezvous_point_loopback_id: 253 # Different from default 254 + vpc_peer_link_vlan: "3700" # Different from default 3600 + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 300 # Different from default 360 + vpc_delay_restore_timer: 120 # Different from default 150 + vpc_peer_link_port_channel_id: "600" # Different from default 500 + vpc_ipv6_neighbor_discovery_sync: false # Different from default true + advertise_physical_ip: true # Different from default false + vpc_domain_id_range: "1-800" # Different from default 1-1000 + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9000 # Different from default 9216 + l2_host_interface_mtu: 9000 # Different from default 9216 + tenant_dhcp: false # Different from default true + nxapi: false + nxapi_https_port: 443 + nxapi_http: true # Different from default false + nxapi_http_port: 80 + snmp_trap: false # Different from default true + anycast_border_gateway_advertise_physical_ip: true # Different from default false + greenfield_debug_flag: enable # Different from default disable + tcam_allocation: false # Different from default true + real_time_interface_statistics_collection: true # Different from default false + interface_statistics_load_interval: 30 # Different from default 10 + bgp_loopback_ip_range: "10.22.0.0/22" # Different from default range + nve_loopback_ip_range: "10.23.0.0/22" # Different from default range + anycast_rendezvous_point_ip_range: "10.254.252.0/24" # Different from default range + intra_fabric_subnet_range: "10.24.0.0/16" # Different from default range + l2_vni_range: "40000-59000" # Different from default range + l3_vni_range: "60000-69000" # Different from default range + network_vlan_range: "2400-3099" # Different from default range + vrf_vlan_range: "2100-2399" # Different from default range + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.53.0.0/16" # Different from default range + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.25.0.0/22" # Different from default range + per_vrf_loopback_auto_provision_ipv6: true + per_vrf_loopback_ipv6_range: "fd00::a25:0/112" # Different from default range + banner: "^ Updated via replaced state ^" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: ebgp_replaced_result_1 + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2a: Verify eBGP fabric was created using replaced state" + assert: + that: + - ebgp_replaced_result_1 is changed + - ebgp_replaced_result_1 is not failed + fail_msg: "eBGP fabric creation with state replaced failed" + success_msg: "eBGP fabric successfully created with state replaced" + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2b: Create eBGP fabric using state replaced (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ebgp_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65004" # Different from default ASN + bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65100" + site_id: "65004" + bgp_as_mode: multiAS # Different from default multiAS + bgp_allow_as_in_num: 2 + bgp_max_path: 8 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" + performance_monitoring: true + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 4 + rendezvous_point_loopback_id: 253 + vpc_peer_link_vlan: "3700" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 300 + vpc_delay_restore_timer: 120 + vpc_peer_link_port_channel_id: "600" + vpc_ipv6_neighbor_discovery_sync: false + advertise_physical_ip: true + vpc_domain_id_range: "1-800" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9000 + l2_host_interface_mtu: 9000 + tenant_dhcp: false + nxapi: false + nxapi_https_port: 443 + nxapi_http: true + nxapi_http_port: 80 + snmp_trap: false + anycast_border_gateway_advertise_physical_ip: true + greenfield_debug_flag: enable + tcam_allocation: false + real_time_interface_statistics_collection: true + interface_statistics_load_interval: 30 + bgp_loopback_ip_range: "10.22.0.0/22" + nve_loopback_ip_range: "10.23.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.252.0/24" + intra_fabric_subnet_range: "10.24.0.0/16" + l2_vni_range: "40000-59000" + l3_vni_range: "60000-69000" + network_vlan_range: "2400-3099" + vrf_vlan_range: "2100-2399" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.53.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.25.0.0/22" + per_vrf_loopback_auto_provision_ipv6: true + per_vrf_loopback_ipv6_range: "fd00::a25:0/112" + banner: "^ Updated via replaced state ^" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: ebgp_replaced_result_2 + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2b: Verify replaced state is idempotent" + assert: + that: + - ebgp_replaced_result_2 is not changed + - ebgp_replaced_result_2 is not failed + fail_msg: "Replaced state is not idempotent - should not change when run twice with same config" + success_msg: "Replaced state is idempotent - no changes on second run" + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2c: Update eBGP fabric using state replaced (complete replacement with minimal config)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ebgp_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65004" # Different from default ASN + bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65100" + site_id: "65004" + banner: "^ Updated via replaced state ^" + register: ebgp_replaced_result_3 + tags: [test_replaced, test_replaced_update] + +- name: "TEST 2c: Verify eBGP fabric was completely replaced (defaults restored)" + assert: + that: + - ebgp_replaced_result_3 is changed + - ebgp_replaced_result_3 is not failed + fail_msg: "eBGP fabric replacement with state replaced failed" + success_msg: "eBGP fabric successfully replaced with state replaced" + tags: [test_replaced, test_replaced_update] + +############################################################################# +# VALIDATION: Query ebgp_test_fabric_replaced and validate defaults are restored +############################################################################# +- name: "VALIDATION 2: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response_2 + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Query ebgp_test_fabric_replaced configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ ebgp_test_fabric_replaced }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response_2.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: ebgp_replaced_fabric_query + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Parse eBGP fabric configuration response" + set_fact: + ebgp_replaced_fabric_config: "{{ ebgp_replaced_fabric_query.json }}" + tags: [test_replaced, test_replaced_validation] + +# Network Range Validations - verify defaults were restored +- name: "VALIDATION 2: Verify L3 VNI Range was standardized to 50000-59000" + assert: + that: + - ebgp_replaced_fabric_config.management.l3VniRange == "50000-59000" + fail_msg: "L3 VNI Range validation failed. Expected: 50000-59000, Actual: {{ ebgp_replaced_fabric_config.management.l3VniRange }}" + success_msg: "✓ L3 VNI Range correctly standardized to 50000-59000" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify L2 VNI Range was standardized to 30000-49000" + assert: + that: + - ebgp_replaced_fabric_config.management.l2VniRange == "30000-49000" + fail_msg: "L2 VNI Range validation failed. Expected: 30000-49000, Actual: {{ ebgp_replaced_fabric_config.management.l2VniRange }}" + success_msg: "✓ L2 VNI Range correctly standardized to 30000-49000" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify BGP Loopback IP Range was standardized to 10.2.0.0/22" + assert: + that: + - ebgp_replaced_fabric_config.management.bgpLoopbackIpRange == "10.2.0.0/22" + fail_msg: "BGP Loopback IP Range validation failed. Expected: 10.2.0.0/22, Actual: {{ ebgp_replaced_fabric_config.management.bgpLoopbackIpRange }}" + success_msg: "✓ BGP Loopback IP Range correctly standardized to 10.2.0.0/22" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify NVE Loopback IP Range was standardized to 10.3.0.0/22" + assert: + that: + - ebgp_replaced_fabric_config.management.nveLoopbackIpRange == "10.3.0.0/22" + fail_msg: "NVE Loopback IP Range validation failed. Expected: 10.3.0.0/22, Actual: {{ ebgp_replaced_fabric_config.management.nveLoopbackIpRange }}" + success_msg: "✓ NVE Loopback IP Range correctly standardized to 10.3.0.0/22" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Intra-Fabric Subnet Range was standardized to 10.4.0.0/16" + assert: + that: + - ebgp_replaced_fabric_config.management.intraFabricSubnetRange == "10.4.0.0/16" + fail_msg: "Intra-Fabric Subnet Range validation failed. Expected: 10.4.0.0/16, Actual: {{ ebgp_replaced_fabric_config.management.intraFabricSubnetRange }}" + success_msg: "✓ Intra-Fabric Subnet Range correctly standardized to 10.4.0.0/16" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VRF Lite Subnet Range was standardized to 10.33.0.0/16" + assert: + that: + - ebgp_replaced_fabric_config.management.vrfLiteSubnetRange == "10.33.0.0/16" + fail_msg: "VRF Lite Subnet Range validation failed. Expected: 10.33.0.0/16, Actual: {{ ebgp_replaced_fabric_config.management.vrfLiteSubnetRange }}" + success_msg: "✓ VRF Lite Subnet Range correctly standardized to 10.33.0.0/16" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Anycast RP IP Range was standardized to 10.254.254.0/24" + assert: + that: + - ebgp_replaced_fabric_config.management.anycastRendezvousPointIpRange == "10.254.254.0/24" + fail_msg: "Anycast RP IP Range validation failed. Expected: 10.254.254.0/24, Actual: {{ ebgp_replaced_fabric_config.management.anycastRendezvousPointIpRange }}" + success_msg: "✓ Anycast RP IP Range correctly standardized to 10.254.254.0/24" + tags: [test_replaced, test_replaced_validation] + +# VLAN Range Validations +- name: "VALIDATION 2: Verify Network VLAN Range was standardized to 2300-2999" + assert: + that: + - ebgp_replaced_fabric_config.management.networkVlanRange == "2300-2999" + fail_msg: "Network VLAN Range validation failed. Expected: 2300-2999, Actual: {{ ebgp_replaced_fabric_config.management.networkVlanRange }}" + success_msg: "✓ Network VLAN Range correctly standardized to 2300-2999" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VRF VLAN Range was standardized to 2000-2299" + assert: + that: + - ebgp_replaced_fabric_config.management.vrfVlanRange == "2000-2299" + fail_msg: "VRF VLAN Range validation failed. Expected: 2000-2299, Actual: {{ ebgp_replaced_fabric_config.management.vrfVlanRange }}" + success_msg: "✓ VRF VLAN Range correctly standardized to 2000-2299" + tags: [test_replaced, test_replaced_validation] + +# MTU Validations +- name: "VALIDATION 2: Verify Fabric MTU was restored to 9216" + assert: + that: + - ebgp_replaced_fabric_config.management.fabricMtu == 9216 + fail_msg: "Fabric MTU validation failed. Expected: 9216, Actual: {{ ebgp_replaced_fabric_config.management.fabricMtu }}" + success_msg: "✓ Fabric MTU correctly restored to 9216" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify L2 Host Interface MTU was restored to 9216" + assert: + that: + - ebgp_replaced_fabric_config.management.l2HostInterfaceMtu == 9216 + fail_msg: "L2 Host Interface MTU validation failed. Expected: 9216, Actual: {{ ebgp_replaced_fabric_config.management.l2HostInterfaceMtu }}" + success_msg: "✓ L2 Host Interface MTU correctly restored to 9216" + tags: [test_replaced, test_replaced_validation] + +# Gateway and Multicast Validations +- name: "VALIDATION 2: Verify Anycast Gateway MAC was standardized to 2020.0000.00aa" + assert: + that: + - ebgp_replaced_fabric_config.management.anycastGatewayMac == "2020.0000.00aa" + fail_msg: "Anycast Gateway MAC validation failed. Expected: 2020.0000.00aa, Actual: {{ ebgp_replaced_fabric_config.management.anycastGatewayMac }}" + success_msg: "✓ Anycast Gateway MAC correctly standardized to 2020.0000.00aa" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Multicast Group Subnet was standardized to 239.1.1.0/25" + assert: + that: + - ebgp_replaced_fabric_config.management.multicastGroupSubnet == "239.1.1.0/25" + fail_msg: "Multicast Group Subnet validation failed. Expected: 239.1.1.0/25, Actual: {{ ebgp_replaced_fabric_config.management.multicastGroupSubnet }}" + success_msg: "✓ Multicast Group Subnet correctly standardized to 239.1.1.0/25" + tags: [test_replaced, test_replaced_validation] + +# VPC Configuration Validations +- name: "VALIDATION 2: Verify VPC Auto Recovery Timer was standardized to 360" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcAutoRecoveryTimer == 360 + fail_msg: "VPC Auto Recovery Timer validation failed. Expected: 360, Actual: {{ ebgp_replaced_fabric_config.management.vpcAutoRecoveryTimer }}" + success_msg: "✓ VPC Auto Recovery Timer correctly standardized to 360" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VPC Delay Restore Timer was standardized to 150" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcDelayRestoreTimer == 150 + fail_msg: "VPC Delay Restore Timer validation failed. Expected: 150, Actual: {{ ebgp_replaced_fabric_config.management.vpcDelayRestoreTimer }}" + success_msg: "✓ VPC Delay Restore Timer correctly standardized to 150" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VPC Peer Link Port Channel ID was standardized to 500" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcPeerLinkPortChannelId == "500" + fail_msg: "VPC Peer Link Port Channel ID validation failed. Expected: 500, Actual: {{ ebgp_replaced_fabric_config.management.vpcPeerLinkPortChannelId }}" + success_msg: "✓ VPC Peer Link Port Channel ID correctly standardized to 500" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VPC Peer Link VLAN was standardized to 3600" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcPeerLinkVlan == "3600" + fail_msg: "VPC Peer Link VLAN validation failed. Expected: 3600, Actual: {{ ebgp_replaced_fabric_config.management.vpcPeerLinkVlan }}" + success_msg: "✓ VPC Peer Link VLAN correctly standardized to 3600" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VPC Domain ID Range was standardized to 1-1000" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcDomainIdRange == "1-1000" + fail_msg: "VPC Domain ID Range validation failed. Expected: 1-1000, Actual: {{ ebgp_replaced_fabric_config.management.vpcDomainIdRange }}" + success_msg: "✓ VPC Domain ID Range correctly standardized to 1-1000" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify VPC IPv6 Neighbor Discovery Sync was enabled" + assert: + that: + - ebgp_replaced_fabric_config.management.vpcIpv6NeighborDiscoverySync == true + fail_msg: "VPC IPv6 Neighbor Discovery Sync validation failed. Expected: true, Actual: {{ ebgp_replaced_fabric_config.management.vpcIpv6NeighborDiscoverySync }}" + success_msg: "✓ VPC IPv6 Neighbor Discovery Sync correctly enabled" + tags: [test_replaced, test_replaced_validation] + +# Multicast Settings Validations +- name: "VALIDATION 2: Verify Rendezvous Point Count was standardized to 2" + assert: + that: + - ebgp_replaced_fabric_config.management.rendezvousPointCount == 2 + fail_msg: "Rendezvous Point Count validation failed. Expected: 2, Actual: {{ ebgp_replaced_fabric_config.management.rendezvousPointCount }}" + success_msg: "✓ Rendezvous Point Count correctly standardized to 2" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Rendezvous Point Loopback ID was standardized to 254" + assert: + that: + - ebgp_replaced_fabric_config.management.rendezvousPointLoopbackId == 254 + fail_msg: "Rendezvous Point Loopback ID validation failed. Expected: 254, Actual: {{ ebgp_replaced_fabric_config.management.rendezvousPointLoopbackId }}" + success_msg: "✓ Rendezvous Point Loopback ID correctly standardized to 254" + tags: [test_replaced, test_replaced_validation] + +# eBGP-specific Validations +- name: "VALIDATION 2: Verify BGP AS Mode was standardized to multiAS" + assert: + that: + - ebgp_replaced_fabric_config.management.bgpAsMode == "multiAS" + fail_msg: "BGP AS Mode validation failed. Expected: multiAS, Actual: {{ ebgp_replaced_fabric_config.management.bgpAsMode }}" + success_msg: "✓ BGP AS Mode correctly standardized to multiAS" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify BGP Allow AS In Num was standardized to 1" + assert: + that: + - ebgp_replaced_fabric_config.management.bgpAllowAsInNum == 1 + fail_msg: "BGP Allow AS In Num validation failed. Expected: 1, Actual: {{ ebgp_replaced_fabric_config.management.bgpAllowAsInNum }}" + success_msg: "✓ BGP Allow AS In Num correctly standardized to 1" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify BGP Max Path was standardized to 4" + assert: + that: + - ebgp_replaced_fabric_config.management.bgpMaxPath == 4 + fail_msg: "BGP Max Path validation failed. Expected: 4, Actual: {{ ebgp_replaced_fabric_config.management.bgpMaxPath }}" + success_msg: "✓ BGP Max Path correctly standardized to 4" + tags: [test_replaced, test_replaced_validation] + +# Feature Flag Validations +- name: "VALIDATION 2: Verify TCAM Allocation was re-enabled" + assert: + that: + - ebgp_replaced_fabric_config.management.tcamAllocation == true + fail_msg: "TCAM Allocation validation failed. Expected: true, Actual: {{ ebgp_replaced_fabric_config.management.tcamAllocation }}" + success_msg: "✓ TCAM Allocation correctly re-enabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Real Time Interface Statistics Collection was disabled" + assert: + that: + - ebgp_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection == false + fail_msg: "Real Time Interface Statistics Collection validation failed. Expected: false, Actual: {{ ebgp_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection }}" + success_msg: "✓ Real Time Interface Statistics Collection correctly disabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Performance Monitoring was disabled" + assert: + that: + - ebgp_replaced_fabric_config.management.performanceMonitoring == false + fail_msg: "Performance Monitoring validation failed. Expected: false, Actual: {{ ebgp_replaced_fabric_config.management.performanceMonitoring }}" + success_msg: "✓ Performance Monitoring correctly disabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Tenant DHCP was re-enabled" + assert: + that: + - ebgp_replaced_fabric_config.management.tenantDhcp == true + fail_msg: "Tenant DHCP validation failed. Expected: true, Actual: {{ ebgp_replaced_fabric_config.management.tenantDhcp }}" + success_msg: "✓ Tenant DHCP correctly re-enabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify SNMP Trap was re-enabled" + assert: + that: + - ebgp_replaced_fabric_config.management.snmpTrap == true + fail_msg: "SNMP Trap validation failed. Expected: true, Actual: {{ ebgp_replaced_fabric_config.management.snmpTrap }}" + success_msg: "✓ SNMP Trap correctly re-enabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Greenfield Debug Flag was set to disable (eBGP default)" + assert: + that: + - ebgp_replaced_fabric_config.management.greenfieldDebugFlag == "disable" + fail_msg: "Greenfield Debug Flag validation failed. Expected: disable, Actual: {{ ebgp_replaced_fabric_config.management.greenfieldDebugFlag }}" + success_msg: "✓ Greenfield Debug Flag correctly set to disable (eBGP default)" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify NXAPI HTTP is always true for eBGP (ND enforced behavior)" + assert: + that: + - ebgp_replaced_fabric_config.management.nxapiHttp == true + fail_msg: "NXAPI HTTP validation failed. ND enforces nxapiHttp=true for eBGP fabrics, Actual: {{ ebgp_replaced_fabric_config.management.nxapiHttp }}" + success_msg: "✓ NXAPI HTTP is true (ND enforces this for eBGP fabrics regardless of configured value)" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify NXAPI was disabled" + assert: + that: + - ebgp_replaced_fabric_config.management.nxapi == false + fail_msg: "NXAPI validation failed. Expected: false, Actual: {{ ebgp_replaced_fabric_config.management.nxapi }}" + success_msg: "✓ NXAPI correctly disabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Per VRF Loopback Auto Provision was disabled" + assert: + that: + - ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvision == false + fail_msg: "Per VRF Loopback Auto Provision validation failed. Expected: false, Actual: {{ ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvision }}" + success_msg: "✓ Per VRF Loopback Auto Provision correctly disabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Per VRF Loopback Auto Provision IPv6 was disabled" + assert: + that: + - ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvisionIpv6 == false + fail_msg: "Per VRF Loopback Auto Provision IPv6 validation failed. Expected: false, Actual: {{ ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvisionIpv6 }}" + success_msg: "✓ Per VRF Loopback Auto Provision IPv6 correctly disabled" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Banner was preserved" + assert: + that: + - ebgp_replaced_fabric_config.management.banner == "^ Updated via replaced state ^" + fail_msg: "Banner validation failed. Expected: '^ Updated via replaced state ^', Actual: {{ ebgp_replaced_fabric_config.management.banner }}" + success_msg: "✓ Banner correctly preserved: '{{ ebgp_replaced_fabric_config.management.banner }}'" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Display successful validation summary for ebgp_test_fabric_replaced" + debug: + msg: | + ======================================== + VALIDATION SUMMARY for ebgp_test_fabric_replaced: + ======================================== + Network Ranges (restored to defaults): + ✓ L3 VNI Range: {{ ebgp_replaced_fabric_config.management.l3VniRange }} + ✓ L2 VNI Range: {{ ebgp_replaced_fabric_config.management.l2VniRange }} + ✓ BGP Loopback IP Range: {{ ebgp_replaced_fabric_config.management.bgpLoopbackIpRange }} + ✓ NVE Loopback IP Range: {{ ebgp_replaced_fabric_config.management.nveLoopbackIpRange }} + ✓ Intra-Fabric Subnet Range: {{ ebgp_replaced_fabric_config.management.intraFabricSubnetRange }} + ✓ VRF Lite Subnet Range: {{ ebgp_replaced_fabric_config.management.vrfLiteSubnetRange }} + ✓ Anycast RP IP Range: {{ ebgp_replaced_fabric_config.management.anycastRendezvousPointIpRange }} + + VLAN Ranges: + ✓ Network VLAN Range: {{ ebgp_replaced_fabric_config.management.networkVlanRange }} + ✓ VRF VLAN Range: {{ ebgp_replaced_fabric_config.management.vrfVlanRange }} + + MTU Settings: + ✓ Fabric MTU: {{ ebgp_replaced_fabric_config.management.fabricMtu }} + ✓ L2 Host Interface MTU: {{ ebgp_replaced_fabric_config.management.l2HostInterfaceMtu }} + + VPC Configuration: + ✓ VPC Auto Recovery Timer: {{ ebgp_replaced_fabric_config.management.vpcAutoRecoveryTimer }} + ✓ VPC Delay Restore Timer: {{ ebgp_replaced_fabric_config.management.vpcDelayRestoreTimer }} + ✓ VPC Peer Link Port Channel ID: {{ ebgp_replaced_fabric_config.management.vpcPeerLinkPortChannelId }} + ✓ VPC Peer Link VLAN: {{ ebgp_replaced_fabric_config.management.vpcPeerLinkVlan }} + ✓ VPC Domain ID Range: {{ ebgp_replaced_fabric_config.management.vpcDomainIdRange }} + ✓ VPC IPv6 Neighbor Discovery Sync: {{ ebgp_replaced_fabric_config.management.vpcIpv6NeighborDiscoverySync }} + + Gateway & Multicast: + ✓ Anycast Gateway MAC: {{ ebgp_replaced_fabric_config.management.anycastGatewayMac }} + ✓ Multicast Group Subnet: {{ ebgp_replaced_fabric_config.management.multicastGroupSubnet }} + ✓ Rendezvous Point Count: {{ ebgp_replaced_fabric_config.management.rendezvousPointCount }} + ✓ Rendezvous Point Loopback ID: {{ ebgp_replaced_fabric_config.management.rendezvousPointLoopbackId }} + + eBGP-specific: + ✓ BGP AS Mode: {{ ebgp_replaced_fabric_config.management.bgpAsMode }} + ✓ BGP Allow AS In Num: {{ ebgp_replaced_fabric_config.management.bgpAllowAsInNum }} + ✓ BGP Max Path: {{ ebgp_replaced_fabric_config.management.bgpMaxPath }} + + Feature Flags: + ✓ TCAM Allocation: {{ ebgp_replaced_fabric_config.management.tcamAllocation }} + ✓ Real Time Interface Statistics Collection: {{ ebgp_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection }} + ✓ Performance Monitoring: {{ ebgp_replaced_fabric_config.management.performanceMonitoring }} + ✓ Tenant DHCP: {{ ebgp_replaced_fabric_config.management.tenantDhcp }} + ✓ SNMP Trap: {{ ebgp_replaced_fabric_config.management.snmpTrap }} + ✓ Greenfield Debug Flag (eBGP default): {{ ebgp_replaced_fabric_config.management.greenfieldDebugFlag }} + ✓ NXAPI HTTP (ND enforces true for eBGP): {{ ebgp_replaced_fabric_config.management.nxapiHttp }} + ✓ NXAPI: {{ ebgp_replaced_fabric_config.management.nxapi }} + + Auto-Provisioning: + ✓ Per VRF Loopback Auto Provision: {{ ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvision }} + ✓ Per VRF Loopback Auto Provision IPv6: {{ ebgp_replaced_fabric_config.management.perVrfLoopbackAutoProvisionIpv6 }} + + Preserved Settings: + ✓ Banner: "{{ ebgp_replaced_fabric_config.management.banner }}" + + All 35+ expected changes validated successfully! + ======================================== + tags: [test_replaced, test_replaced_validation] + +############################################################################# +# TEST 3: Demonstrate difference between merged and replaced states +############################################################################# +- name: "TEST 3: Create eBGP fabric for merged vs replaced comparison" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: replaced + config: + - "{{ {'fabric_name': ebgp_test_fabric_deleted} | combine(fabric_config_ebgp) }}" + register: ebgp_comparison_fabric_creation + tags: [test_comparison] + +- name: "TEST 3a: Partial update using merged state (should merge changes)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: merged + config: + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + category: fabric + management: + bgp_asn: "65004" # Different from default ASN + # bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65100" + fabric_mtu: 8000 # Only updating MTU + register: ebgp_merged_partial_result + tags: [test_comparison, test_merged_partial] + +- name: "TEST 3a: Verify merged state preserves existing configuration" + assert: + that: + - ebgp_merged_partial_result is changed + - ebgp_merged_partial_result is not failed + fail_msg: "Partial update with merged state failed" + success_msg: "Merged state successfully performed partial update" + tags: [test_comparison, test_merged_partial] + +- name: "TEST 3b: Partial update using replaced state (should replace entire config)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + category: fabric + management: + type: vxlanEbgp + bgp_asn: "65100" + bgp_asn_auto_allocation: true + bgp_asn_range: "65000-65100" + target_subnet_mask: 30 + register: ebgp_replaced_partial_result + tags: [test_comparison, test_replaced_partial] + +- name: "TEST 3b: Verify replaced state performs complete replacement" + assert: + that: + - ebgp_replaced_partial_result is changed + - ebgp_replaced_partial_result is not failed + fail_msg: "Partial replacement with replaced state failed" + success_msg: "Replaced state successfully performed complete replacement" + tags: [test_comparison, test_replaced_partial] + +############################################################################# +# TEST 4: STATE DELETED - Delete fabrics +############################################################################# +- name: "TEST 4a: Delete eBGP fabric using state deleted" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + register: ebgp_deleted_result_1 + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 4a: Verify eBGP fabric was deleted" + assert: + that: + - ebgp_deleted_result_1 is changed + - ebgp_deleted_result_1 is not failed + fail_msg: "eBGP fabric deletion with state deleted failed" + success_msg: "eBGP fabric successfully deleted with state deleted" + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 4b: Delete eBGP fabric using state deleted (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + register: ebgp_deleted_result_2 + tags: [test_deleted, test_deleted_idempotent] + +- name: "TEST 4b: Verify deleted state is idempotent" + assert: + that: + - ebgp_deleted_result_2 is not changed + - ebgp_deleted_result_2 is not failed + fail_msg: "Deleted state is not idempotent - should not change when deleting non-existent fabric" + success_msg: "Deleted state is idempotent - no changes when deleting non-existent fabric" + tags: [test_deleted, test_deleted_idempotent] + +############################################################################# +# TEST 5: Multiple fabric operations in single task +############################################################################# +- name: "TEST 5: Multiple eBGP fabric operations in single task" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: merged + config: + - fabric_name: "multi_ebgp_fabric_1" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65101" + bgp_asn_auto_allocation: false + site_id: "65101" + bgp_as_mode: sameTierAS + bgp_allow_as_in_num: 1 + bgp_max_path: 4 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0001" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: false + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: disable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.101.0.0/22" + nve_loopback_ip_range: "10.103.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.101.0/24" + intra_fabric_subnet_range: "10.104.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.133.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.105.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + - fabric_name: "multi_ebgp_fabric_2" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65102" + bgp_asn_auto_allocation: false + site_id: "65102" + bgp_as_mode: sameTierAS + bgp_allow_as_in_num: 1 + bgp_max_path: 4 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0002" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: false + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: disable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.102.0.0/22" + nve_loopback_ip_range: "10.103.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.102.0/24" + intra_fabric_subnet_range: "10.104.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.134.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.106.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: ebgp_multi_fabric_result + tags: [test_multi, test_multi_create] + +- name: "TEST 5: Verify multiple eBGP fabrics were created" + assert: + that: + - ebgp_multi_fabric_result is changed + - ebgp_multi_fabric_result is not failed + fail_msg: "Multiple eBGP fabric creation failed" + success_msg: "Multiple eBGP fabrics successfully created" + tags: [test_multi, test_multi_create] + +############################################################################# +# FINAL CLEANUP - Clean up all test fabrics +############################################################################# +- name: "CLEANUP: Delete all test eBGP fabrics" + cisco.nd.nd_manage_fabric_ebgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ebgp_test_fabric_merged }}" + - fabric_name: "{{ ebgp_test_fabric_replaced }}" + - fabric_name: "{{ ebgp_test_fabric_deleted }}" + - fabric_name: "multi_ebgp_fabric_1" + - fabric_name: "multi_ebgp_fabric_2" + ignore_errors: true + tags: [cleanup, always] + +############################################################################# +# TEST SUMMARY +############################################################################# +- name: "TEST SUMMARY: Display eBGP test results" + debug: + msg: | + ======================================================== + TEST SUMMARY for cisco.nd.nd_manage_fabric_ebgp module: + ======================================================== + ✓ TEST 1: STATE MERGED + - Create fabric: {{ 'PASSED' if ebgp_merged_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ebgp_merged_result_2 is not changed else 'FAILED' }} + - Update fabric: {{ 'PASSED' if ebgp_merged_result_3 is changed else 'FAILED' }} + + ✓ TEST 2: STATE REPLACED + - Create fabric: {{ 'PASSED' if ebgp_replaced_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ebgp_replaced_result_2 is not changed else 'FAILED' }} + - Replace fabric: {{ 'PASSED' if ebgp_replaced_result_3 is changed else 'FAILED' }} + + ✓ TEST 3: MERGED vs REPLACED Comparison + - Merged partial: {{ 'PASSED' if ebgp_merged_partial_result is changed else 'FAILED' }} + - Replaced partial: {{ 'PASSED' if ebgp_replaced_partial_result is changed else 'FAILED' }} + + ✓ TEST 4: STATE DELETED + - Delete fabric: {{ 'PASSED' if ebgp_deleted_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ebgp_deleted_result_2 is not changed else 'FAILED' }} + + ✓ TEST 5: MULTIPLE FABRICS + - Multi-create: {{ 'PASSED' if ebgp_multi_fabric_result is changed else 'FAILED' }} + + All tests validate: + - State merged: Creates and updates eBGP fabrics by merging changes + - State replaced: Creates and completely replaces eBGP fabric configuration + - State deleted: Removes eBGP fabrics + - Idempotency: All operations are idempotent when run multiple times + - Difference: Merged preserves existing config, replaced overwrites completely + - eBGP-specific: bgpAsMode, bgpAllowAsInNum, bgpMaxPath defaults validated + ======================================== + tags: [summary, always] diff --git a/tests/integration/targets/nd_manage_fabric/tasks/fabric_external.yaml b/tests/integration/targets/nd_manage_fabric/tasks/fabric_external.yaml new file mode 100644 index 00000000..e5841a81 --- /dev/null +++ b/tests/integration/targets/nd_manage_fabric/tasks/fabric_external.yaml @@ -0,0 +1,719 @@ +--- +# Test code for the ND modules +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Test that we have a Nexus Dashboard host, username and password + ansible.builtin.fail: + msg: 'Please define the following variables: ansible_host, ansible_user and ansible_password.' + when: ansible_host is not defined or ansible_user is not defined or ansible_password is not defined + +- name: Set vars + ansible.builtin.set_fact: + nd_info: &nd_info + output_level: '{{ api_key_output_level | default("debug") }}' + +############################################################################# +# CLEANUP - Ensure clean state before tests +############################################################################# +- name: Clean up any existing test fabrics before starting tests + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ext_test_fabric_merged }}" + - fabric_name: "{{ ext_test_fabric_replaced }}" + - fabric_name: "{{ ext_test_fabric_deleted }}" + tags: always + +############################################################################# +# TEST 1: STATE MERGED - Create fabric using merged state +############################################################################# +- name: "TEST 1a: Create fabric using state merged (first run)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': ext_test_fabric_merged} | combine(fabric_config_external) }}" + register: ext_merged_result_1 + tags: [test_merged, test_merged_create] + +- name: "TEST 1a: Verify fabric was created using merged state" + assert: + that: + - ext_merged_result_1 is changed + - ext_merged_result_1 is not failed + fail_msg: "Fabric creation with state merged failed" + success_msg: "Fabric successfully created with state merged" + tags: [test_merged, test_merged_create] + +- name: "TEST 1b: Create fabric using state merged (second run - idempotency test)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': ext_test_fabric_merged} | combine(fabric_config_external) }}" + register: ext_merged_result_2 + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1b: Verify merged state is idempotent" + assert: + that: + - ext_merged_result_2 is not changed + - ext_merged_result_2 is not failed + fail_msg: "Merged state is not idempotent - should not change when run twice with same config" + success_msg: "Merged state is idempotent - no changes on second run" + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1c: Update fabric using state merged (modify existing)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: merged + config: + - fabric_name: "{{ ext_test_fabric_merged }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65002" # Changed from 65001 + copp_policy: strict # Changed from manual + create_bgp_config: true + cdp: true # Changed from false + snmp_trap: false # Changed from true + nxapi: true # Changed from false + nxapi_http: true # Changed from false + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: true # Changed from false + real_time_interface_statistics_collection: true # Changed from false + interface_statistics_load_interval: 30 # Changed from 10 + sub_interface_dot1q_range: "2-511" + power_redundancy_mode: combined # Changed from redundant + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: ext_merged_result_3 + tags: [test_merged, test_merged_update] + +- name: "TEST 1c: Verify fabric was updated using merged state" + assert: + that: + - ext_merged_result_3 is changed + - ext_merged_result_3 is not failed + fail_msg: "Fabric update with state merged failed" + success_msg: "Fabric successfully updated with state merged" + tags: [test_merged, test_merged_update] + +############################################################################# +# VALIDATION: Query ext_test_fabric_merged and validate expected changes +############################################################################# +- name: "VALIDATION 1: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Query ext_test_fabric_merged configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ ext_test_fabric_merged }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: ext_merged_fabric_query + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Parse fabric configuration response" + set_fact: + ext_merged_fabric_config: "{{ ext_merged_fabric_query.json }}" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify BGP ASN was updated to 65002" + assert: + that: + - ext_merged_fabric_config.management.bgpAsn == "65002" + fail_msg: "BGP ASN validation failed. Expected: 65002, Actual: {{ ext_merged_fabric_config.management.bgpAsn }}" + success_msg: "✓ BGP ASN correctly updated to 65002" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify CoPP Policy was updated to strict" + assert: + that: + - ext_merged_fabric_config.management.coppPolicy == "strict" + fail_msg: "CoPP Policy validation failed. Expected: strict, Actual: {{ ext_merged_fabric_config.management.coppPolicy }}" + success_msg: "✓ CoPP Policy correctly updated to strict" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify Performance Monitoring was enabled" + assert: + that: + - ext_merged_fabric_config.management.performanceMonitoring == true + fail_msg: "Performance Monitoring validation failed. Expected: true, Actual: {{ ext_merged_fabric_config.management.performanceMonitoring }}" + success_msg: "✓ Performance Monitoring correctly enabled" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Verify CDP was enabled" + assert: + that: + - ext_merged_fabric_config.management.cdp == true + fail_msg: "CDP validation failed. Expected: true, Actual: {{ ext_merged_fabric_config.management.cdp }}" + success_msg: "✓ CDP correctly enabled" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Display successful validation summary for ext_test_fabric_merged" + debug: + msg: | + ======================================== + VALIDATION SUMMARY for ext_test_fabric_merged: + ======================================== + ✓ BGP ASN: {{ ext_merged_fabric_config.management.bgpAsn }} + ✓ CoPP Policy: {{ ext_merged_fabric_config.management.coppPolicy }} + ✓ Performance Monitoring: {{ ext_merged_fabric_config.management.performanceMonitoring }} + ✓ CDP: {{ ext_merged_fabric_config.management.cdp }} + + All 4 expected changes validated successfully! + ======================================== + tags: [test_merged, test_merged_validation] + +############################################################################# +# TEST 2: STATE REPLACED - Create and manage fabric using replaced state +############################################################################# +- name: "TEST 2a: Create fabric using state replaced (first run)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ext_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65004" + copp_policy: strict # Different from default + create_bgp_config: true + cdp: true # Different from default + snmp_trap: false # Different from default + nxapi: true # Different from default + nxapi_http: true # Different from default + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: true # Different from default + real_time_interface_statistics_collection: true # Different from default + interface_statistics_load_interval: 30 # Different from default + sub_interface_dot1q_range: "2-511" + power_redundancy_mode: combined # Different from default + ptp: true # Different from default + ptp_domain_id: 10 # Different from default + ptp_loopback_id: 5 # Different from default + mpls_handoff: false + mpls_loopback_ip_range: "10.102.0.0/25" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + extra_config_aaa: "" + extra_config_fabric: "" + register: ext_replaced_result_1 + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2a: Verify fabric was created using replaced state" + assert: + that: + - ext_replaced_result_1 is changed + - ext_replaced_result_1 is not failed + fail_msg: "Fabric creation with state replaced failed" + success_msg: "Fabric successfully created with state replaced" + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2b: Create fabric using state replaced (second run - idempotency test)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ext_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65004" + copp_policy: strict + create_bgp_config: true + cdp: true + snmp_trap: false + nxapi: true + nxapi_http: true + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: true + real_time_interface_statistics_collection: true + interface_statistics_load_interval: 30 + sub_interface_dot1q_range: "2-511" + power_redundancy_mode: combined + ptp: true + ptp_domain_id: 10 + ptp_loopback_id: 5 + mpls_handoff: false + mpls_loopback_ip_range: "10.102.0.0/25" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + extra_config_aaa: "" + extra_config_fabric: "" + register: ext_replaced_result_2 + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2b: Verify replaced state is idempotent" + assert: + that: + - ext_replaced_result_2 is not changed + - ext_replaced_result_2 is not failed + fail_msg: "Replaced state is not idempotent - should not change when run twice with same config" + success_msg: "Replaced state is idempotent - no changes on second run" + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2c: Update fabric using state replaced (complete replacement with minimal config)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ext_test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65004" + register: ext_replaced_result_3 + tags: [test_replaced, test_replaced_update] + +- name: "TEST 2c: Verify fabric was completely replaced" + assert: + that: + - ext_replaced_result_3 is changed + - ext_replaced_result_3 is not failed + fail_msg: "Fabric replacement with state replaced failed" + success_msg: "Fabric successfully replaced with state replaced" + tags: [test_replaced, test_replaced_update] + +############################################################################# +# VALIDATION: Query ext_test_fabric_replaced and validate defaults restored +############################################################################# +- name: "VALIDATION 2: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response_2 + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Query ext_test_fabric_replaced configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ ext_test_fabric_replaced }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response_2.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: ext_replaced_fabric_query + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Parse fabric configuration response" + set_fact: + ext_replaced_fabric_config: "{{ ext_replaced_fabric_query.json }}" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify CoPP Policy was standardized to manual (default)" + assert: + that: + - ext_replaced_fabric_config.management.coppPolicy == "manual" + fail_msg: "CoPP Policy validation failed. Expected: manual, Actual: {{ ext_replaced_fabric_config.management.coppPolicy }}" + success_msg: "✓ CoPP Policy correctly standardized to manual" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify SNMP Trap was restored to default (true)" + assert: + that: + - ext_replaced_fabric_config.management.snmpTrap == true + fail_msg: "SNMP Trap validation failed. Expected: true, Actual: {{ ext_replaced_fabric_config.management.snmpTrap }}" + success_msg: "✓ SNMP Trap correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify CDP was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.cdp == false + fail_msg: "CDP validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.cdp }}" + success_msg: "✓ CDP correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify NXAPI was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.nxapi == false + fail_msg: "NXAPI validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.nxapi }}" + success_msg: "✓ NXAPI correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify NXAPI HTTP was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.nxapiHttp == false + fail_msg: "NXAPI HTTP validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.nxapiHttp }}" + success_msg: "✓ NXAPI HTTP correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Performance Monitoring was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.performanceMonitoring == false + fail_msg: "Performance Monitoring validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.performanceMonitoring }}" + success_msg: "✓ Performance Monitoring correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Real Time Interface Statistics Collection was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection == false + fail_msg: "Real Time Interface Statistics Collection validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection }}" + success_msg: "✓ Real Time Interface Statistics Collection correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify Power Redundancy Mode was restored to default (redundant)" + assert: + that: + - ext_replaced_fabric_config.management.powerRedundancyMode == "redundant" + fail_msg: "Power Redundancy Mode validation failed. Expected: redundant, Actual: {{ ext_replaced_fabric_config.management.powerRedundancyMode }}" + success_msg: "✓ Power Redundancy Mode correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Verify PTP was restored to default (false)" + assert: + that: + - ext_replaced_fabric_config.management.ptp == false + fail_msg: "PTP validation failed. Expected: false, Actual: {{ ext_replaced_fabric_config.management.ptp }}" + success_msg: "✓ PTP correctly restored to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Display successful validation summary for ext_test_fabric_replaced" + debug: + msg: | + ======================================== + VALIDATION SUMMARY for ext_test_fabric_replaced: + ======================================== + ✓ CoPP Policy: {{ ext_replaced_fabric_config.management.coppPolicy }} + ✓ SNMP Trap: {{ ext_replaced_fabric_config.management.snmpTrap }} + ✓ CDP: {{ ext_replaced_fabric_config.management.cdp }} + ✓ NXAPI: {{ ext_replaced_fabric_config.management.nxapi }} + ✓ NXAPI HTTP: {{ ext_replaced_fabric_config.management.nxapiHttp }} + ✓ Performance Monitoring: {{ ext_replaced_fabric_config.management.performanceMonitoring }} + ✓ Real Time Interface Statistics: {{ ext_replaced_fabric_config.management.realTimeInterfaceStatisticsCollection }} + ✓ Power Redundancy Mode: {{ ext_replaced_fabric_config.management.powerRedundancyMode }} + ✓ PTP: {{ ext_replaced_fabric_config.management.ptp }} + + All defaults correctly restored after replaced with minimal config! + ======================================== + tags: [test_replaced, test_replaced_validation] + +############################################################################# +# TEST 3: Demonstrate difference between merged and replaced states +############################################################################# +- name: "TEST 3: Create fabric for merged vs replaced comparison" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: replaced + config: + - "{{ {'fabric_name': ext_test_fabric_deleted} | combine(fabric_config_external) }}" + register: ext_comparison_fabric_creation + tags: [test_comparison] + +- name: "TEST 3a: Partial update using merged state (should merge changes)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: merged + config: + - fabric_name: "{{ ext_test_fabric_deleted }}" + category: fabric + management: + bgp_asn: "65099" # Only updating ASN + copp_policy: strict # Only updating CoPP policy + register: ext_merged_partial_result + tags: [test_comparison, test_merged_partial] + +- name: "TEST 3a: Verify merged state preserves existing configuration" + assert: + that: + - ext_merged_partial_result is changed + - ext_merged_partial_result is not failed + fail_msg: "Partial update with merged state failed" + success_msg: "Merged state successfully performed partial update" + tags: [test_comparison, test_merged_partial] + +- name: "TEST 3b: Partial update using replaced state (should replace entire config)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ ext_test_fabric_deleted }}" + category: fabric + management: + type: externalConnectivity + bgp_asn: "65100" # Only specifying minimal config for replaced + register: ext_replaced_partial_result + tags: [test_comparison, test_replaced_partial] + +- name: "TEST 3b: Verify replaced state performs complete replacement" + assert: + that: + - ext_replaced_partial_result is changed + - ext_replaced_partial_result is not failed + fail_msg: "Partial replacement with replaced state failed" + success_msg: "Replaced state successfully performed complete replacement" + tags: [test_comparison, test_replaced_partial] + +############################################################################# +# TEST 4: STATE DELETED - Delete fabrics +############################################################################# +- name: "TEST 4a: Delete fabric using state deleted" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ext_test_fabric_deleted }}" + register: ext_deleted_result_1 + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 4a: Verify fabric was deleted" + assert: + that: + - ext_deleted_result_1 is changed + - ext_deleted_result_1 is not failed + fail_msg: "Fabric deletion with state deleted failed" + success_msg: "Fabric successfully deleted with state deleted" + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 4b: Delete fabric using state deleted (second run - idempotency test)" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ext_test_fabric_deleted }}" + register: ext_deleted_result_2 + tags: [test_deleted, test_deleted_idempotent] + +- name: "TEST 4b: Verify deleted state is idempotent" + assert: + that: + - ext_deleted_result_2 is not changed + - ext_deleted_result_2 is not failed + fail_msg: "Deleted state is not idempotent - should not change when deleting non-existent fabric" + success_msg: "Deleted state is idempotent - no changes when deleting non-existent fabric" + tags: [test_deleted, test_deleted_idempotent] + +############################################################################# +# TEST 5: Multiple fabric operations in single task +############################################################################# +- name: "TEST 5: Multiple fabric operations in single task" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: merged + config: + - fabric_name: "ext_multi_fabric_1" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65101" + copp_policy: manual + create_bgp_config: true + cdp: false + snmp_trap: true + nxapi: false + nxapi_http: false + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: false + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + sub_interface_dot1q_range: "2-511" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + - fabric_name: "ext_multi_fabric_2" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65102" + copp_policy: manual + create_bgp_config: true + cdp: false + snmp_trap: true + nxapi: false + nxapi_http: false + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: false + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + sub_interface_dot1q_range: "2-511" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: ext_multi_fabric_result + tags: [test_multi, test_multi_create] + +- name: "TEST 5: Verify multiple fabrics were created" + assert: + that: + - ext_multi_fabric_result is changed + - ext_multi_fabric_result is not failed + fail_msg: "Multiple fabric creation failed" + success_msg: "Multiple fabrics successfully created" + tags: [test_multi, test_multi_create] + +############################################################################# +# FINAL CLEANUP - Clean up all test fabrics +############################################################################# +- name: "CLEANUP: Delete all test fabrics" + cisco.nd.nd_manage_fabric_external: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ ext_test_fabric_merged }}" + - fabric_name: "{{ ext_test_fabric_replaced }}" + - fabric_name: "{{ ext_test_fabric_deleted }}" + - fabric_name: "ext_multi_fabric_1" + - fabric_name: "ext_multi_fabric_2" + ignore_errors: true + tags: [cleanup, always] + +############################################################################# +# TEST SUMMARY +############################################################################# +- name: "TEST SUMMARY: Display test results" + debug: + msg: | + ======================================================== + TEST SUMMARY for cisco.nd.nd_manage_fabric_external module: + ======================================================== + ✓ TEST 1: STATE MERGED + - Create fabric: {{ 'PASSED' if ext_merged_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ext_merged_result_2 is not changed else 'FAILED' }} + - Update fabric: {{ 'PASSED' if ext_merged_result_3 is changed else 'FAILED' }} + + ✓ TEST 2: STATE REPLACED + - Create fabric: {{ 'PASSED' if ext_replaced_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ext_replaced_result_2 is not changed else 'FAILED' }} + - Replace fabric: {{ 'PASSED' if ext_replaced_result_3 is changed else 'FAILED' }} + + ✓ TEST 3: MERGED vs REPLACED Comparison + - Merged partial: {{ 'PASSED' if ext_merged_partial_result is changed else 'FAILED' }} + - Replaced partial: {{ 'PASSED' if ext_replaced_partial_result is changed else 'FAILED' }} + + ✓ TEST 4: STATE DELETED + - Delete fabric: {{ 'PASSED' if ext_deleted_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if ext_deleted_result_2 is not changed else 'FAILED' }} + + ✓ TEST 5: MULTIPLE FABRICS + - Multi-create: {{ 'PASSED' if ext_multi_fabric_result is changed else 'FAILED' }} + + All tests validate: + - State merged: Creates and updates fabrics by merging changes + - State replaced: Creates and completely replaces fabric configuration + - State deleted: Removes fabrics + - Idempotency: All operations are idempotent when run multiple times + - Difference: Merged preserves existing config, replaced overwrites completely + ======================================== + tags: [summary, always] diff --git a/tests/integration/targets/nd_manage_fabric/tasks/fabric_ibgp.yaml b/tests/integration/targets/nd_manage_fabric/tasks/fabric_ibgp.yaml new file mode 100644 index 00000000..733cd35a --- /dev/null +++ b/tests/integration/targets/nd_manage_fabric/tasks/fabric_ibgp.yaml @@ -0,0 +1,1332 @@ +--- +# Test code for the ND modules +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Test that we have a Nexus Dashboard host, username and password + ansible.builtin.fail: + msg: 'Please define the following variables: ansible_host, ansible_user and ansible_password.' + when: ansible_host is not defined or ansible_user is not defined or ansible_password is not defined + +- name: Set vars + ansible.builtin.set_fact: + nd_info: &nd_info + output_level: '{{ api_key_output_level | default("debug") }}' + +############################################################################# +# CLEANUP - Ensure clean state before tests +############################################################################# +- name: Clean up any existing test fabrics before starting tests + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ test_fabric_merged }}" + - fabric_name: "{{ test_fabric_replaced }}" + tags: always + +############################################################################# +# TEST 1: STATE MERGED - Create fabric using merged state +############################################################################# +- name: "TEST 1a: Create fabric using state merged (first run)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': test_fabric_merged} | combine(fabric_config_ibgp) }}" + register: merged_result_1 + tags: [test_merged, test_merged_create] + +- name: "TEST 1a: Verify fabric was created using merged state" + assert: + that: + - merged_result_1 is changed + - merged_result_1 is not failed + fail_msg: "Fabric creation with state merged failed" + success_msg: "Fabric successfully created with state merged" + tags: [test_merged, test_merged_create] + +- name: "TEST 1b: Create fabric using state merged (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: merged + config: + - "{{ {'fabric_name': test_fabric_merged} | combine(fabric_config_ibgp) }}" + register: merged_result_2 + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1b: Verify merged state is idempotent" + assert: + that: + - merged_result_2 is not changed + - merged_result_2 is not failed + fail_msg: "Merged state is not idempotent - should not change when run twice with same config" + success_msg: "Merged state is idempotent - no changes on second run" + tags: [test_merged, test_merged_idempotent] + +- name: "TEST 1c: Update fabric using state merged (modify existing)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: merged + config: + - fabric_name: "{{ test_fabric_merged }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65002" # Changed from 65001 + site_id: "65002" # Changed from 65001 + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00bb" # Changed from 00aa + performance_monitoring: true # Changed from false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: true + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: enable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + register: merged_result_3 + tags: [test_merged, test_merged_update] + +- name: "TEST 1c: Verify fabric was updated using merged state" + assert: + that: + - merged_result_3 is changed + - merged_result_3 is not failed + fail_msg: "Fabric update with state merged failed" + success_msg: "Fabric successfully updated with state merged" + tags: [test_merged, test_merged_update] + +############################################################################# +# VALIDATION: Query test_fabric_merged and validate expected changes +############################################################################# +# Get authentication token first +- name: "VALIDATION 1: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Query test_fabric_merged configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ test_fabric_merged }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: merged_fabric_query + tags: [test_merged, test_merged_validation] + delegate_to: localhost + +- name: "VALIDATION 1: Parse fabric configuration response" + set_fact: + merged_fabric_config: "{{ merged_fabric_query.json }}" + tags: [test_merged, test_merged_validation] + +# +# Category 1: Properties CHANGED by TEST 1c merge +# +- name: "VALIDATION 1a: Verify changed properties after merge" + assert: + that: + - merged_fabric_config.management.bgpAsn == "65002" + - merged_fabric_config.management.siteId == "65002" + - merged_fabric_config.management.anycastGatewayMac == "2020.0000.00bb" + - merged_fabric_config.management.performanceMonitoring == true + fail_msg: >- + Changed properties validation failed. + bgpAsn: {{ merged_fabric_config.management.bgpAsn }} (expected 65002), + siteId: {{ merged_fabric_config.management.siteId }} (expected 65002), + anycastGatewayMac: {{ merged_fabric_config.management.anycastGatewayMac }} (expected 2020.0000.00bb), + performanceMonitoring: {{ merged_fabric_config.management.performanceMonitoring }} (expected true) + success_msg: "✓ All 4 changed properties updated correctly (bgpAsn, siteId, anycastGatewayMac, performanceMonitoring)" + tags: [test_merged, test_merged_validation] + +# +# Category 2: Properties re-specified in TEST 1c with same values +# +- name: "VALIDATION 1b: Verify re-specified management properties (same values)" + assert: + that: + # Core + - merged_fabric_config.management.targetSubnetMask == 30 + - merged_fabric_config.management.fabricMtu == 9216 + - merged_fabric_config.management.l2HostInterfaceMtu == 9216 + - merged_fabric_config.management.l3VniNoVlanDefaultOption == false + # Multicast / Replication + - merged_fabric_config.management.replicationMode == "multicast" + - merged_fabric_config.management.multicastGroupSubnet == "239.1.1.0/25" + - merged_fabric_config.management.autoGenerateMulticastGroupAddress == false + - merged_fabric_config.management.underlayMulticastGroupAddressLimit == 128 + - merged_fabric_config.management.tenantRoutedMulticast == false + - merged_fabric_config.management.rendezvousPointCount == 2 + - merged_fabric_config.management.rendezvousPointLoopbackId == 254 + # vPC + - merged_fabric_config.management.vpcPeerLinkVlan == "3600" + - merged_fabric_config.management.vpcPeerLinkEnableNativeVlan == false + - merged_fabric_config.management.vpcPeerKeepAliveOption == "loopback" + - merged_fabric_config.management.vpcAutoRecoveryTimer == 360 + - merged_fabric_config.management.vpcDelayRestoreTimer == 150 + - merged_fabric_config.management.vpcPeerLinkPortChannelId == "500" + # Loopback / Domain IDs + - merged_fabric_config.management.vpcDomainIdRange == "1-1000" + - merged_fabric_config.management.bgpLoopbackId == 0 + - merged_fabric_config.management.nveLoopbackId == 1 + # Templates + - merged_fabric_config.management.vrfTemplate == "Default_VRF_Universal" + - merged_fabric_config.management.networkTemplate == "Default_Network_Universal" + - merged_fabric_config.management.vrfExtensionTemplate == "Default_VRF_Extension_Universal" + - merged_fabric_config.management.networkExtensionTemplate == "Default_Network_Extension_Universal" + fail_msg: "Re-specified management properties validation failed" + success_msg: "✓ All 24 re-specified management properties match expected values" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1b: Verify re-specified IP ranges and VNI ranges" + assert: + that: + # IP Ranges + - merged_fabric_config.management.bgpLoopbackIpRange == "10.2.0.0/22" + - merged_fabric_config.management.nveLoopbackIpRange == "10.3.0.0/22" + - merged_fabric_config.management.anycastRendezvousPointIpRange == "10.254.254.0/24" + - merged_fabric_config.management.intraFabricSubnetRange == "10.4.0.0/16" + # VNI / VLAN Ranges + - merged_fabric_config.management.l2VniRange == "30000-49000" + - merged_fabric_config.management.l3VniRange == "50000-59000" + - merged_fabric_config.management.networkVlanRange == "2300-2999" + - merged_fabric_config.management.vrfVlanRange == "2000-2299" + - merged_fabric_config.management.subInterfaceDot1qRange == "2-511" + # VRF Lite / DCI + - merged_fabric_config.management.vrfLiteAutoConfig == "manual" + - merged_fabric_config.management.vrfLiteSubnetRange == "10.33.0.0/16" + - merged_fabric_config.management.vrfLiteSubnetTargetMask == 30 + - merged_fabric_config.management.autoUniqueVrfLiteIpPrefix == false + # Per-VRF Loopback + - merged_fabric_config.management.perVrfLoopbackAutoProvision == true + - merged_fabric_config.management.perVrfLoopbackIpRange == "10.5.0.0/22" + fail_msg: "Re-specified IP/VNI ranges validation failed" + success_msg: "✓ All 15 re-specified IP and VNI range properties match expected values" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1b: Verify re-specified system and NX-API settings" + assert: + that: + - merged_fabric_config.management.tenantDhcp == true + - merged_fabric_config.management.nxapi == true + - merged_fabric_config.management.nxapiHttpsPort == 443 + - merged_fabric_config.management.nxapiHttp == false + - merged_fabric_config.management.nxapiHttpPort == 80 + - merged_fabric_config.management.snmpTrap == true + - merged_fabric_config.management.advertisePhysicalIp == false + - merged_fabric_config.management.anycastBorderGatewayAdvertisePhysicalIp == false + - merged_fabric_config.management.greenfieldDebugFlag == "enable" + - merged_fabric_config.management.tcamAllocation == true + - merged_fabric_config.management.realTimeInterfaceStatisticsCollection == false + # Bootstrap / DHCP + - merged_fabric_config.management.day0Bootstrap == false + - merged_fabric_config.management.localDhcpServer == false + fail_msg: "Re-specified system/NX-API settings validation failed" + success_msg: "✓ All 13 re-specified system and NX-API properties match expected values" + tags: [test_merged, test_merged_validation] + +# +# Category 3: Properties NOT in TEST 1c - MUST be preserved from original create +# These are the critical assertions for validating the merge fix. +# Prior to the fix, these would be reset to Pydantic model defaults. +# +- name: "VALIDATION 1c: Verify preserved underlay/overlay config (not in merge task)" + assert: + that: + - merged_fabric_config.management.overlayMode == "cli" + - merged_fabric_config.management.underlayIpv6 == false + - merged_fabric_config.management.fabricInterfaceType == "p2p" + - merged_fabric_config.management.linkStateRoutingProtocol == "ospf" + - merged_fabric_config.management.ospfAreaId == "0.0.0.0" + - merged_fabric_config.management.routeReflectorCount == 4 + - merged_fabric_config.management.staticUnderlayIpAllocation == false + fail_msg: >- + Preserved underlay/overlay config validation failed. These fields were not + in the merge task and should retain their original values from creation. + success_msg: "✓ All 7 preserved underlay/overlay properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved multicast/RP settings (not in merge task)" + assert: + that: + - merged_fabric_config.management.tenantRoutedMulticastIpv6 == false + - merged_fabric_config.management.rendezvousPointMode == "asm" + - merged_fabric_config.management.pimHelloAuthentication == false + fail_msg: >- + Preserved multicast/RP settings validation failed. These fields were not + in the merge task and should retain their original values. + success_msg: "✓ All 3 preserved multicast/RP properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved vPC extended settings (not in merge task)" + assert: + that: + - merged_fabric_config.management.vpcIpv6NeighborDiscoverySync == true + - merged_fabric_config.management.vpcLayer3PeerRouter == true + - merged_fabric_config.management.vpcTorDelayRestoreTimer == 30 + - merged_fabric_config.management.fabricVpcDomainId == false + - merged_fabric_config.management.fabricVpcQos == false + - merged_fabric_config.management.enablePeerSwitch == false + fail_msg: >- + Preserved vPC extended settings validation failed. These fields were not + in the merge task and should retain their original values. + success_msg: "✓ All 6 preserved vPC extended properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved advertising and protocol auth (not in merge task)" + assert: + that: + - merged_fabric_config.management.advertisePhysicalIpOnBorder == true + - merged_fabric_config.management.bgpAuthentication == false + - merged_fabric_config.management.ospfAuthentication == false + - merged_fabric_config.management.bfd == false + - merged_fabric_config.management.macsec == false + - merged_fabric_config.management.vrfLiteMacsec == false + fail_msg: >- + Preserved advertising/protocol auth validation failed. These fields were + not in the merge task and should retain their original values. + success_msg: "✓ All 6 preserved advertising and protocol authentication properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved BGP/routing enhancements (not in merge task)" + assert: + that: + - merged_fabric_config.management.autoBgpNeighborDescription == true + - merged_fabric_config.management.linkStateRoutingTag == "UNDERLAY" + fail_msg: >- + Preserved BGP/routing enhancements validation failed. + success_msg: "✓ All 2 preserved BGP/routing enhancement properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved resource ID ranges (not in merge task)" + assert: + that: + - merged_fabric_config.management.ipServiceLevelAgreementIdRange == "10000-19999" + - merged_fabric_config.management.objectTrackingNumberRange == "100-299" + - merged_fabric_config.management.serviceNetworkVlanRange == "3000-3199" + - merged_fabric_config.management.routeMapSequenceNumberRange == "1-65534" + - merged_fabric_config.management.perVrfLoopbackAutoProvisionIpv6 == false + fail_msg: >- + Preserved resource ID ranges validation failed. These fields were not + in the merge task and should retain their original values. + success_msg: "✓ All 5 preserved resource ID range properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved system policies (not in merge task)" + assert: + that: + - merged_fabric_config.management.cdp == false + - merged_fabric_config.management.inbandManagement == false + - merged_fabric_config.management.securityGroupTag == false + - merged_fabric_config.management.privateVlan == false + - merged_fabric_config.management.defaultQueuingPolicy == false + - merged_fabric_config.management.aimlQos == false + - merged_fabric_config.management.dlb == false + - merged_fabric_config.management.aiLoadSharing == false + - merged_fabric_config.management.ptp == false + - merged_fabric_config.management.stpRootOption == "unmanaged" + - merged_fabric_config.management.mplsHandoff == false + fail_msg: >- + Preserved system policies validation failed. These fields were not + in the merge task and should retain their original values. + success_msg: "✓ All 11 preserved system policy properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved OAM/compliance/system settings (not in merge task)" + assert: + that: + - merged_fabric_config.management.allowVlanOnLeafTorPairing == "none" + - merged_fabric_config.management.leafTorIdRange == false + - merged_fabric_config.management.nveHoldDownTimer == 180 + - merged_fabric_config.management.strictConfigComplianceMode == false + - merged_fabric_config.management.advancedSshOption == false + - merged_fabric_config.management.coppPolicy == "strict" + - merged_fabric_config.management.powerRedundancyMode == "redundant" + - merged_fabric_config.management.hostInterfaceAdminState == true + - merged_fabric_config.management.policyBasedRouting == false + fail_msg: >- + Preserved OAM/compliance/system settings validation failed. + success_msg: "✓ All 9 preserved OAM/compliance/system properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved banner (not in merge task - critical merge fix test)" + assert: + that: + - merged_fabric_config.management.banner is defined + - merged_fabric_config.management.banner | length > 0 + - "'ADVISORY' in merged_fabric_config.management.banner" + fail_msg: >- + CRITICAL: Banner was reset to empty by the merge operation! + The banner field was not specified in the merge task and must be + preserved from the original create. Expected banner containing + 'ADVISORY', Actual: '{{ merged_fabric_config.management.banner | default("") }}' + success_msg: "✓ Banner correctly preserved after merge: {{ merged_fabric_config.management.banner }}" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1c: Verify preserved backup and brownfield settings (not in merge task)" + assert: + that: + - merged_fabric_config.management.realTimeBackup == false + - merged_fabric_config.management.scheduledBackup == false + - merged_fabric_config.management.brownfieldNetworkNameFormat == "Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$" + - merged_fabric_config.management.brownfieldSkipOverlayNetworkAttachments == false + - merged_fabric_config.management.allowSmartSwitchOnboarding == false + fail_msg: >- + Preserved backup/brownfield settings validation failed. + success_msg: "✓ All 5 preserved backup and brownfield properties retained correctly" + tags: [test_merged, test_merged_validation] + +- name: "VALIDATION 1: Display comprehensive validation summary" + debug: + msg: | + ============================================================ + COMPREHENSIVE VALIDATION SUMMARY for test_fabric_merged + ============================================================ + Category 1 - Changed properties (4 fields): + ✓ bgpAsn: {{ merged_fabric_config.management.bgpAsn }} + ✓ siteId: {{ merged_fabric_config.management.siteId }} + ✓ anycastGatewayMac: {{ merged_fabric_config.management.anycastGatewayMac }} + ✓ performanceMonitoring: {{ merged_fabric_config.management.performanceMonitoring }} + + Category 2 - Re-specified properties (52 fields): + ✓ Management, IP ranges, VNI ranges, system, NX-API, bootstrap + + Category 3 - Preserved properties NOT in merge task (54 fields): + ✓ Underlay/overlay config (7 fields) + ✓ Multicast/RP settings (3 fields) + ✓ vPC extended settings (6 fields) + ✓ Advertising & protocol auth (6 fields) + ✓ BGP/routing enhancements (2 fields) + ✓ Resource ID ranges (5 fields) + ✓ System policies (11 fields) + ✓ OAM/compliance/system (9 fields) + ✓ Banner: {{ merged_fabric_config.management.banner }} + ✓ Backup & brownfield (5 fields) + + Total: 110 properties validated across all categories! + ============================================================ + tags: [test_merged, test_merged_validation] + +############################################################################# +# TEST 2: STATE REPLACED - Create and manage fabric using replaced state +############################################################################# +- name: "TEST 2a: Create fabric using state replaced (first run)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65004" # DIfferent from default ASN + site_id: "65004" # DIfferent from default site_id + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" # DIfferent from default MAC + performance_monitoring: true # DIfferent from default to true + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" # DIfferent from default subnet + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 4 # DIfferent from default count + rendezvous_point_loopback_id: 253 # DIfferent from default loopback + vpc_peer_link_vlan: "3700" # DIfferent from default VLAN + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 300 # DIfferent from default timer + vpc_delay_restore_timer: 120 # DIfferent from default timer + vpc_peer_link_port_channel_id: "600" # DIfferent from default port channel + vpc_ipv6_neighbor_discovery_sync: false # DIfferent from default to false + advertise_physical_ip: true # DIfferent from default to true + vpc_domain_id_range: "1-800" # DIfferent from default range + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9000 # DIfferent from default MTU + l2_host_interface_mtu: 9000 # DIfferent from default MTU + tenant_dhcp: false # DIfferent from default to false + nxapi: false # DIfferent from default to false + nxapi_https_port: 443 + nxapi_http: true # DIfferent from default to true + nxapi_http_port: 80 + snmp_trap: false # DIfferent from default to false + anycast_border_gateway_advertise_physical_ip: true # DIfferent from default to true + greenfield_debug_flag: disable # DIfferent from default to disable + tcam_allocation: false # DIfferent from default to false + real_time_interface_statistics_collection: true # DIfferent from default to true + interface_statistics_load_interval: 30 # DIfferent from default interval + bgp_loopback_ip_range: "10.22.0.0/22" # DIfferent from default range + nve_loopback_ip_range: "10.23.0.0/22" # DIfferent from default range + anycast_rendezvous_point_ip_range: "10.254.252.0/24" # DIfferent from default range + intra_fabric_subnet_range: "10.24.0.0/16" # DIfferent from default range + l2_vni_range: "40000-59000" # DIfferent from default range + l3_vni_range: "60000-69000" # DIfferent from default range + network_vlan_range: "2400-3099" # DIfferent from default range + vrf_vlan_range: "2100-2399" # DIfferent from default range + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.53.0.0/16" # DIfferent from default range + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.25.0.0/22" # DIfferent from default range + per_vrf_loopback_auto_provision_ipv6: true + per_vrf_loopback_ipv6_range: "fd00::a25:0/112" # DIfferent from default range + banner: "^ Updated via replaced state ^" # Added banner + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: replaced_result_1 + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2a: Verify fabric was created using replaced state" + assert: + that: + - replaced_result_1 is changed + - replaced_result_1 is not failed + fail_msg: "Fabric creation with state replaced failed" + success_msg: "Fabric successfully created with state replaced" + tags: [test_replaced, test_replaced_create] + +- name: "TEST 2b: Create fabric using state replaced (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65004" # DIfferent from default ASN + site_id: "65004" # DIfferent from default site_id + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00dd" # DIfferent from default MAC + performance_monitoring: true # DIfferent from default to true + replication_mode: multicast + multicast_group_subnet: "239.1.3.0/25" # DIfferent from default subnet + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 4 # DIfferent from default count + rendezvous_point_loopback_id: 253 # DIfferent from default loopback + vpc_peer_link_vlan: "3700" # DIfferent from default VLAN + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 300 # DIfferent from default timer + vpc_delay_restore_timer: 120 # DIfferent from default timer + vpc_peer_link_port_channel_id: "600" # DIfferent from default port channel + vpc_ipv6_neighbor_discovery_sync: false # DIfferent from default to false + advertise_physical_ip: true # DIfferent from default to true + vpc_domain_id_range: "1-800" # DIfferent from default range + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9000 # DIfferent from default MTU + l2_host_interface_mtu: 9000 # DIfferent from default MTU + tenant_dhcp: false # DIfferent from default to false + nxapi: false # DIfferent from default to false + nxapi_https_port: 443 + nxapi_http: true # DIfferent from default to true + nxapi_http_port: 80 + snmp_trap: false # DIfferent from default to false + anycast_border_gateway_advertise_physical_ip: true # DIfferent from default to true + greenfield_debug_flag: disable # DIfferent from default to disable + tcam_allocation: false # DIfferent from default to false + real_time_interface_statistics_collection: true # DIfferent from default to true + interface_statistics_load_interval: 30 # DIfferent from default interval + bgp_loopback_ip_range: "10.22.0.0/22" # DIfferent from default range + nve_loopback_ip_range: "10.23.0.0/22" # DIfferent from default range + anycast_rendezvous_point_ip_range: "10.254.252.0/24" # DIfferent from default range + intra_fabric_subnet_range: "10.24.0.0/16" # DIfferent from default range + l2_vni_range: "40000-59000" # DIfferent from default range + l3_vni_range: "60000-69000" # DIfferent from default range + network_vlan_range: "2400-3099" # DIfferent from default range + vrf_vlan_range: "2100-2399" # DIfferent from default range + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.53.0.0/16" # DIfferent from default range + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.25.0.0/22" # DIfferent from default range + per_vrf_loopback_auto_provision_ipv6: true + per_vrf_loopback_ipv6_range: "fd00::a25:0/112" # DIfferent from default range + banner: "^ Updated via replaced state ^" # Added banner + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + management_ipv6_prefix: 64 + register: replaced_result_2 + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2b: Verify replaced state is idempotent" + assert: + that: + - replaced_result_2 is not changed + - replaced_result_2 is not failed + fail_msg: "Replaced state is not idempotent - should not change when run twice with same config" + success_msg: "Replaced state is idempotent - no changes on second run" + tags: [test_replaced, test_replaced_idempotent] + +- name: "TEST 2c: Update fabric using state replaced (complete replacement)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: replaced + config: + - fabric_name: "{{ test_fabric_replaced }}" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65004" # Changed ASN + site_id: "65004" # Changed site_id + banner: "^ Updated via replaced state ^" # Added banner + register: replaced_result_3 + tags: [test_replaced, test_replaced_update] + +- name: "TEST 2c: Verify fabric was completely replaced" + assert: + that: + - replaced_result_3 is changed + - replaced_result_3 is not failed + fail_msg: "Fabric replacement with state replaced failed" + success_msg: "Fabric successfully replaced with state replaced" + tags: [test_replaced, test_replaced_update] + +# ############################################################################# +# # VALIDATION: Query test_fabric_replaced and validate expected changes +# ############################################################################# +# Get authentication token first +- name: "VALIDATION 2: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response_2 + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Query test_fabric_replaced configuration from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ test_fabric_replaced }}" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response_2.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: replaced_fabric_query + tags: [test_replaced, test_replaced_validation] + delegate_to: localhost + +- name: "VALIDATION 2: Parse fabric configuration response" + set_fact: + replaced_fabric_config: "{{ replaced_fabric_query.json }}" + tags: [test_replaced, test_replaced_validation] + +# Network Range Validations +# +# Category 1: Properties explicitly specified in TEST 2c replace task +# +- name: "VALIDATION 2a: Verify explicitly specified properties in replace" + assert: + that: + - replaced_fabric_config.management.bgpAsn == "65004" + - replaced_fabric_config.management.siteId == "65004" + - replaced_fabric_config.management.banner == "^ Updated via replaced state ^" + fail_msg: >- + Explicitly specified properties validation failed. + bgpAsn: {{ replaced_fabric_config.management.bgpAsn }} (expected 65004), + siteId: {{ replaced_fabric_config.management.siteId }} (expected 65004), + banner: '{{ replaced_fabric_config.management.banner }}' (expected '^ Updated via replaced state ^') + success_msg: "✓ All 3 explicitly specified properties set correctly (bgpAsn, siteId, banner)" + tags: [test_replaced, test_replaced_validation] + +# +# Category 2: Properties NOT specified in TEST 2c - MUST revert to Pydantic defaults +# This is the critical replaced behavior: complete replacement means unspecified +# properties get their model default values, NOT the values from TEST 2a. +# +- name: "VALIDATION 2b: Verify core/overlay defaults after replace (was changed in 2a)" + assert: + that: + # These were set to non-default values in TEST 2a and must now revert + - replaced_fabric_config.management.anycastGatewayMac == "2020.0000.00aa" # was 2020.0000.00dd + - replaced_fabric_config.management.performanceMonitoring == false # was true + - replaced_fabric_config.management.targetSubnetMask == 30 + - replaced_fabric_config.management.fabricMtu == 9216 # was 9000 + - replaced_fabric_config.management.l2HostInterfaceMtu == 9216 # was 9000 + - replaced_fabric_config.management.overlayMode == "cli" + - replaced_fabric_config.management.underlayIpv6 == false + - replaced_fabric_config.management.fabricInterfaceType == "p2p" + - replaced_fabric_config.management.linkStateRoutingProtocol == "ospf" + - replaced_fabric_config.management.ospfAreaId == "0.0.0.0" + - replaced_fabric_config.management.staticUnderlayIpAllocation == false + fail_msg: >- + Core/overlay defaults validation failed after replace. Properties not + specified in the replace task must revert to Pydantic model defaults. + success_msg: "✓ All 11 core/overlay properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify multicast/replication defaults after replace" + assert: + that: + - replaced_fabric_config.management.replicationMode == "multicast" + - replaced_fabric_config.management.multicastGroupSubnet == "239.1.1.0/25" # was 239.1.3.0/25 + - replaced_fabric_config.management.autoGenerateMulticastGroupAddress == false + - replaced_fabric_config.management.underlayMulticastGroupAddressLimit == 128 + - replaced_fabric_config.management.tenantRoutedMulticast == false + - replaced_fabric_config.management.tenantRoutedMulticastIpv6 == false + - replaced_fabric_config.management.rendezvousPointCount == 2 # was 4 + - replaced_fabric_config.management.rendezvousPointLoopbackId == 254 # was 253 + - replaced_fabric_config.management.rendezvousPointMode == "asm" + - replaced_fabric_config.management.pimHelloAuthentication == false + fail_msg: >- + Multicast/replication defaults validation failed after replace. + success_msg: "✓ All 10 multicast/replication properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify vPC defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.vpcPeerLinkVlan == "3600" # was 3700 + - replaced_fabric_config.management.vpcPeerLinkEnableNativeVlan == false + - replaced_fabric_config.management.vpcPeerKeepAliveOption == "management" # default is management, 2a used loopback + - replaced_fabric_config.management.vpcAutoRecoveryTimer == 360 # was 300 + - replaced_fabric_config.management.vpcDelayRestoreTimer == 150 # was 120 + - replaced_fabric_config.management.vpcPeerLinkPortChannelId == "500" # was 600 + - replaced_fabric_config.management.vpcDomainIdRange == "1-1000" # was 1-800 + - replaced_fabric_config.management.vpcIpv6NeighborDiscoverySync == true # was false in 2a + - replaced_fabric_config.management.vpcLayer3PeerRouter == true + - replaced_fabric_config.management.vpcTorDelayRestoreTimer == 30 + - replaced_fabric_config.management.fabricVpcDomainId == false + - replaced_fabric_config.management.fabricVpcQos == false + - replaced_fabric_config.management.enablePeerSwitch == false + fail_msg: >- + vPC defaults validation failed after replace. + success_msg: "✓ All 13 vPC properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify loopback/template/routing defaults after replace" + assert: + that: + - replaced_fabric_config.management.bgpLoopbackId == 0 + - replaced_fabric_config.management.nveLoopbackId == 1 + - replaced_fabric_config.management.routeReflectorCount == 2 + - replaced_fabric_config.management.vrfTemplate == "Default_VRF_Universal" + - replaced_fabric_config.management.networkTemplate == "Default_Network_Universal" + - replaced_fabric_config.management.vrfExtensionTemplate == "Default_VRF_Extension_Universal" + - replaced_fabric_config.management.networkExtensionTemplate == "Default_Network_Extension_Universal" + - replaced_fabric_config.management.autoBgpNeighborDescription == true + - replaced_fabric_config.management.linkStateRoutingTag == "UNDERLAY" + fail_msg: >- + Loopback/template/routing defaults validation failed after replace. + success_msg: "✓ All 9 loopback/template/routing properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify IP range defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.bgpLoopbackIpRange == "10.2.0.0/22" # was 10.22.0.0/22 + - replaced_fabric_config.management.nveLoopbackIpRange == "10.3.0.0/22" # was 10.23.0.0/22 + - replaced_fabric_config.management.anycastRendezvousPointIpRange == "10.254.254.0/24" # was 10.254.252.0/24 + - replaced_fabric_config.management.intraFabricSubnetRange == "10.4.0.0/16" # was 10.24.0.0/16 + fail_msg: >- + IP range defaults validation failed after replace. + success_msg: "✓ All 4 IP range properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify VNI/VLAN range defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.l2VniRange == "30000-49000" # was 40000-59000 + - replaced_fabric_config.management.l3VniRange == "50000-59000" # was 60000-69000 + - replaced_fabric_config.management.networkVlanRange == "2300-2999" # was 2400-3099 + - replaced_fabric_config.management.vrfVlanRange == "2000-2299" # was 2100-2399 + - replaced_fabric_config.management.subInterfaceDot1qRange == "2-511" + - replaced_fabric_config.management.l3VniNoVlanDefaultOption == false + fail_msg: >- + VNI/VLAN range defaults validation failed after replace. + success_msg: "✓ All 6 VNI/VLAN range properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify VRF Lite defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.vrfLiteAutoConfig == "manual" + - replaced_fabric_config.management.vrfLiteSubnetRange == "10.33.0.0/16" # was 10.53.0.0/16 + - replaced_fabric_config.management.vrfLiteSubnetTargetMask == 30 + - replaced_fabric_config.management.autoUniqueVrfLiteIpPrefix == false + fail_msg: >- + VRF Lite defaults validation failed after replace. + success_msg: "✓ All 4 VRF Lite properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify per-VRF loopback defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.perVrfLoopbackAutoProvision == false # was true in 2a + - replaced_fabric_config.management.perVrfLoopbackAutoProvisionIpv6 == false # was true in 2a + fail_msg: >- + Per-VRF loopback defaults validation failed after replace. + success_msg: "✓ All 2 per-VRF loopback properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify NX-API/system defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.nxapi == false # default is false + - replaced_fabric_config.management.nxapiHttp == true # NDFC API default is true (overrides Pydantic default of false) + - replaced_fabric_config.management.nxapiHttpsPort == 443 + - replaced_fabric_config.management.nxapiHttpPort == 80 + - replaced_fabric_config.management.tenantDhcp == true + - replaced_fabric_config.management.snmpTrap == true # was false in 2a + - replaced_fabric_config.management.cdp == false + - replaced_fabric_config.management.tcamAllocation == true # was false in 2a + - replaced_fabric_config.management.realTimeInterfaceStatisticsCollection == false # was true in 2a + fail_msg: >- + NX-API/system defaults validation failed after replace. + success_msg: "✓ All 9 NX-API/system properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify advertising/pip defaults after replace (were changed in 2a)" + assert: + that: + - replaced_fabric_config.management.advertisePhysicalIp == false # was true in 2a + - replaced_fabric_config.management.advertisePhysicalIpOnBorder == true + - replaced_fabric_config.management.anycastBorderGatewayAdvertisePhysicalIp == false # was true in 2a + fail_msg: >- + Advertising/PIP defaults validation failed after replace. + success_msg: "✓ All 3 advertising/PIP properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify greenfield/debug defaults after replace" + assert: + that: + - replaced_fabric_config.management.greenfieldDebugFlag == "disable" # default is disable + fail_msg: >- + Greenfield debug flag validation failed after replace. + success_msg: "✓ Greenfield debug flag correctly set to default" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify protocol auth defaults after replace" + assert: + that: + - replaced_fabric_config.management.bgpAuthentication == false + - replaced_fabric_config.management.ospfAuthentication == false + - replaced_fabric_config.management.bfd == false + - replaced_fabric_config.management.macsec == false + - replaced_fabric_config.management.vrfLiteMacsec == false + fail_msg: >- + Protocol authentication defaults validation failed after replace. + success_msg: "✓ All 5 protocol authentication properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify system policy defaults after replace" + assert: + that: + - replaced_fabric_config.management.securityGroupTag == false + - replaced_fabric_config.management.privateVlan == false + - replaced_fabric_config.management.defaultQueuingPolicy == false + - replaced_fabric_config.management.aimlQos == false + - replaced_fabric_config.management.dlb == false + - replaced_fabric_config.management.aiLoadSharing == false + - replaced_fabric_config.management.ptp == false + - replaced_fabric_config.management.stpRootOption == "unmanaged" + - replaced_fabric_config.management.mplsHandoff == false + - replaced_fabric_config.management.allowVlanOnLeafTorPairing == "none" + - replaced_fabric_config.management.leafTorIdRange == false + fail_msg: >- + System policy defaults validation failed after replace. + success_msg: "✓ All 11 system policy properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify OAM/compliance/advanced defaults after replace" + assert: + that: + - replaced_fabric_config.management.nveHoldDownTimer == 180 + - replaced_fabric_config.management.nextGenerationOAM == true + - replaced_fabric_config.management.strictConfigComplianceMode == false + - replaced_fabric_config.management.advancedSshOption == false + - replaced_fabric_config.management.coppPolicy == "strict" + - replaced_fabric_config.management.powerRedundancyMode == "redundant" + - replaced_fabric_config.management.hostInterfaceAdminState == true + - replaced_fabric_config.management.policyBasedRouting == false + - replaced_fabric_config.management.inbandManagement == false + fail_msg: >- + OAM/compliance/advanced defaults validation failed after replace. + success_msg: "✓ All 9 OAM/compliance/advanced properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify resource ID range defaults after replace" + assert: + that: + - replaced_fabric_config.management.ipServiceLevelAgreementIdRange == "10000-19999" + - replaced_fabric_config.management.objectTrackingNumberRange == "100-299" + - replaced_fabric_config.management.serviceNetworkVlanRange == "3000-3199" + - replaced_fabric_config.management.routeMapSequenceNumberRange == "1-65534" + fail_msg: >- + Resource ID range defaults validation failed after replace. + success_msg: "✓ All 4 resource ID range properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify backup/brownfield defaults after replace" + assert: + that: + - replaced_fabric_config.management.realTimeBackup == false + - replaced_fabric_config.management.scheduledBackup == false + - replaced_fabric_config.management.brownfieldNetworkNameFormat == "Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$" + - replaced_fabric_config.management.brownfieldSkipOverlayNetworkAttachments == false + - replaced_fabric_config.management.allowSmartSwitchOnboarding == false + fail_msg: >- + Backup/brownfield defaults validation failed after replace. + success_msg: "✓ All 5 backup/brownfield properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2b: Verify bootstrap/DHCP defaults after replace" + assert: + that: + - replaced_fabric_config.management.day0Bootstrap == false + - replaced_fabric_config.management.localDhcpServer == false + fail_msg: >- + Bootstrap/DHCP defaults validation failed after replace. + success_msg: "✓ All 2 bootstrap/DHCP properties correctly reverted to defaults" + tags: [test_replaced, test_replaced_validation] + +- name: "VALIDATION 2: Display comprehensive validation summary for replaced" + debug: + msg: | + ============================================================ + COMPREHENSIVE VALIDATION SUMMARY for test_fabric_replaced + After TEST 2c: Replace with only bgpAsn, siteId, and banner + ============================================================ + Category 1 - Explicitly specified (3 fields): + ✓ bgpAsn: {{ replaced_fabric_config.management.bgpAsn }} + ✓ siteId: {{ replaced_fabric_config.management.siteId }} + ✓ banner: "{{ replaced_fabric_config.management.banner }}" + + Category 2 - Reverted to Pydantic defaults (108 fields): + ✓ Core/overlay config (11 fields) + ✓ Multicast/replication (10 fields) + ✓ vPC settings (13 fields) + ✓ Loopback/template/routing (9 fields) + ✓ IP ranges (4 fields) + ✓ VNI/VLAN ranges (6 fields) + ✓ VRF Lite (4 fields) + ✓ Per-VRF loopback (2 fields) + ✓ NX-API/system (9 fields) + ✓ Advertising/PIP (3 fields) + ✓ Greenfield debug (1 field) + ✓ Protocol auth (5 fields) + ✓ System policies (11 fields) + ✓ OAM/compliance/advanced (9 fields) + ✓ Resource ID ranges (4 fields) + ✓ Backup/brownfield (5 fields) + ✓ Bootstrap/DHCP (2 fields) + + Total: 111 properties validated! + Key replaced behavior verified: + - Properties from TEST 2a that were NOT in TEST 2c are reset to defaults + - anycastGatewayMac: 2020.0000.00dd → 2020.0000.00aa (default) + - multicastGroupSubnet: 239.1.3.0/25 → 239.1.1.0/25 (default) + - fabricMtu: 9000 → 9216 (default) + - vpcAutoRecoveryTimer: 300 → 360 (default) + - advertisePhysicalIp: true → false (default) + - nxapiHttp: true → false (default) + - perVrfLoopbackAutoProvision: true → false (default) + ============================================================ + tags: [test_replaced, test_replaced_validation] + +############################################################################# +# TEST 3: STATE DELETED - Delete fabrics (uses test_fabric_replaced from TEST 2) +############################################################################# +- name: "TEST 3a: Delete fabric using state deleted" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ test_fabric_replaced }}" + register: deleted_result_1 + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 3a: Verify fabric was deleted" + assert: + that: + - deleted_result_1 is changed + - deleted_result_1 is not failed + fail_msg: "Fabric deletion with state deleted failed" + success_msg: "Fabric successfully deleted with state deleted" + tags: [test_deleted, test_deleted_delete] + +- name: "TEST 3b: Delete fabric using state deleted (second run - idempotency test)" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ test_fabric_replaced }}" + register: deleted_result_2 + tags: [test_deleted, test_deleted_idempotent] + +- name: "TEST 3b: Verify deleted state is idempotent" + assert: + that: + - deleted_result_2 is not changed + - deleted_result_2 is not failed + fail_msg: "Deleted state is not idempotent - should not change when deleting non-existent fabric" + success_msg: "Deleted state is idempotent - no changes when deleting non-existent fabric" + tags: [test_deleted, test_deleted_idempotent] + +############################################################################# +# TEST 4: Multiple fabric operations in single task +############################################################################# +- name: "TEST 4: Multiple fabric operations in single task" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: merged + config: + - fabric_name: "multi_fabric_1" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65101" + site_id: "65101" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0001" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + # vpc_ipv6_neighbor_discovery_sync: true + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: true + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: enable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.101.0.0/22" + nve_loopback_ip_range: "10.103.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.101.0/24" + intra_fabric_subnet_range: "10.104.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.133.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.105.0.0/22" + # per_vrf_loopback_auto_provision_ipv6: false + # per_vrf_loopback_ipv6_range: "fd00::a105:0/112" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + # management_ipv6_prefix: 64 + - fabric_name: "multi_fabric_2" + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65102" + site_id: "65102" + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.0002" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + # vpc_ipv6_neighbor_discovery_sync: true + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: true + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: enable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.102.0.0/22" + nve_loopback_ip_range: "10.103.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.102.0/24" + intra_fabric_subnet_range: "10.104.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.134.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.106.0.0/22" + # per_vrf_loopback_auto_provision_ipv6: false + # per_vrf_loopback_ipv6_range: "fd00::a106:0/112" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + # management_ipv6_prefix: 64 + register: multi_fabric_result + tags: [test_multi, test_multi_create] + +- name: "TEST 4: Verify multiple fabrics were created" + assert: + that: + - multi_fabric_result is changed + - multi_fabric_result is not failed + fail_msg: "Multiple fabric creation failed" + success_msg: "Multiple fabrics successfully created" + tags: [test_multi, test_multi_create] + +############################################################################# +# FINAL CLEANUP - Clean up all test fabrics +############################################################################# +- name: "CLEANUP: Delete all test fabrics" + cisco.nd.nd_manage_fabric_ibgp: + <<: *nd_info + state: deleted + config: + - fabric_name: "{{ test_fabric_merged }}" + - fabric_name: "{{ test_fabric_replaced }}" + - fabric_name: "multi_fabric_1" + - fabric_name: "multi_fabric_2" + ignore_errors: true + tags: [cleanup, always] + +############################################################################# +# TEST SUMMARY +############################################################################# +- name: "TEST SUMMARY: Display test results" + debug: + msg: | + ======================================================== + TEST SUMMARY for cisco.nd.nd_manage_fabric_ibgp module: + ======================================================== + ✓ TEST 1: STATE MERGED + - Create fabric: {{ 'PASSED' if merged_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if merged_result_2 is not changed else 'FAILED' }} + - Update fabric: {{ 'PASSED' if merged_result_3 is changed else 'FAILED' }} + + ✓ TEST 2: STATE REPLACED + - Create fabric: {{ 'PASSED' if replaced_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if replaced_result_2 is not changed else 'FAILED' }} + - Replace fabric: {{ 'PASSED' if replaced_result_3 is changed else 'FAILED' }} + + ✓ TEST 3: STATE DELETED + - Delete fabric: {{ 'PASSED' if deleted_result_1 is changed else 'FAILED' }} + - Idempotency: {{ 'PASSED' if deleted_result_2 is not changed else 'FAILED' }} + + ✓ TEST 4: MULTIPLE FABRICS + - Multi-create: {{ 'PASSED' if multi_fabric_result is changed else 'FAILED' }} + + All tests validate: + - State merged: Creates and updates fabrics by merging changes + - State replaced: Creates and completely replaces fabric configuration + - State deleted: Removes fabrics + - Idempotency: All operations are idempotent when run multiple times + ======================================== + tags: [summary, always] \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_fabric/tasks/main.yaml b/tests/integration/targets/nd_manage_fabric/tasks/main.yaml new file mode 100644 index 00000000..eacc3be3 --- /dev/null +++ b/tests/integration/targets/nd_manage_fabric/tasks/main.yaml @@ -0,0 +1,9 @@ +--- +- name: Run nd_manage_fabric iBGP tests + ansible.builtin.include_tasks: fabric_ibgp.yaml + +- name: Run nd_manage_fabric eBGP tests + ansible.builtin.include_tasks: fabric_ebgp.yaml + +- name: Run nd_manage_fabric External Connectivity tests + ansible.builtin.include_tasks: fabric_external.yaml diff --git a/tests/integration/targets/nd_manage_fabric/vars/main.yaml b/tests/integration/targets/nd_manage_fabric/vars/main.yaml new file mode 100644 index 00000000..893b17bb --- /dev/null +++ b/tests/integration/targets/nd_manage_fabric/vars/main.yaml @@ -0,0 +1,328 @@ +--- + +test_fabric_merged: "ibgp_test_fabric_merged" +test_fabric_replaced: "ibgp_test_fabric_replaced" +test_fabric_deleted: "ibgp_test_fabric_deleted" + +ebgp_test_fabric_merged: "ebgp_test_fabric_merged" +ebgp_test_fabric_replaced: "ebgp_test_fabric_replaced" +ebgp_test_fabric_deleted: "ebgp_test_fabric_deleted" + +ext_test_fabric_merged: "ext_test_fabric_merged" +ext_test_fabric_replaced: "ext_test_fabric_replaced" +ext_test_fabric_deleted: "ext_test_fabric_deleted" + +# Common fabric configuration for all tests +# common_fabric_config: +fabric_config_ibgp: + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanIbgp + bgp_asn: "65001.55" + site_id: "65001" + overlay_mode: cli + underlay_ipv6: false + fabric_interface_type: p2p + link_state_routing_protocol: ospf + ospf_area_id: "0.0.0.0" + route_reflector_count: 4 + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00aa" + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + performance_monitoring: false + static_underlay_ip_allocation: false + + # Replication / Multicast + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + tenant_routed_multicast_ipv6: false + rendezvous_point_count: 2 + rendezvous_point_mode: asm + rendezvous_point_loopback_id: 254 + pim_hello_authentication: false + + # vPC + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: loopback + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + vpc_ipv6_neighbor_discovery_sync: true + vpc_layer3_peer_router: true + vpc_tor_delay_restore_timer: 30 + fabric_vpc_domain_id: false + fabric_vpc_qos: false + enable_peer_switch: false + + # PIP / Advertising + advertise_physical_ip: false + advertise_physical_ip_on_border: true + anycast_border_gateway_advertise_physical_ip: false + + # Domain / Loopback IDs + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + + # Templates + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + + # Protocol Authentication + bgp_authentication: false + ospf_authentication: false + bfd: false + macsec: false + vrf_lite_macsec: false + + # BGP / Routing Enhancements + auto_bgp_neighbor_description: true + ibgp_peer_template: "" + leaf_ibgp_peer_template: "" + link_state_routing_tag: "UNDERLAY" + + # Resource ID Ranges + l3_vni_no_vlan_default_option: false + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + ip_service_level_agreement_id_range: "10000-19999" + object_tracking_number_range: "100-299" + service_network_vlan_range: "3000-3199" + route_map_sequence_number_range: "1-65534" + + # IP Ranges + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + + # VRF Lite / DCI + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + + # Per-VRF Loopback + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + per_vrf_loopback_auto_provision_ipv6: false + + # Management / System + tenant_dhcp: true + nxapi: true + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + cdp: false + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + tcam_allocation: true + inband_management: false + + # Security + security_group_tag: false + private_vlan: false + + # QoS / Queuing + default_queuing_policy: false + aiml_qos: false + + # DLB / AI + dlb: false + ai_load_sharing: false + + # PTP / STP / MPLS + ptp: false + stp_root_option: unmanaged + mpls_handoff: false + + # Leaf / TOR + allow_vlan_on_leaf_tor_pairing: none + leaf_tor_id_range: false + + # OAM / Compliance + nve_hold_down_timer: 180 + next_generation_oam: true + strict_config_compliance_mode: false + greenfield_debug_flag: enable + + # System Policies + advanced_ssh_option: false + copp_policy: strict + power_redundancy_mode: redundant + host_interface_admin_state: true + policy_based_routing: false + + # Freeform Config + extra_config_leaf: "" + extra_config_spine: "" + extra_config_tor: "" + extra_config_intra_fabric_links: "" + extra_config_aaa: "" + pre_interface_config_leaf: "" + pre_interface_config_spine: "" + pre_interface_config_tor: "" + + # Banner + banner: | + @ADVISORY This is a test fabric deployed by Ansible for validation purposes. Do not make changes to this fabric outside of Ansible or use it for production traffic. ADVISORY@ + + # Backup + real_time_backup: false + scheduled_backup: false + + # Brownfield + brownfield_network_name_format: "Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$" + brownfield_skip_overlay_network_attachments: false + + # Hypershield + allow_smart_switch_onboarding: false + + # Bootstrap / DHCP + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + +# Common External Connectivity fabric configuration for all External tests +# common_external_fabric_config: +fabric_config_external: + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: externalConnectivity + bgp_asn: "65001" + copp_policy: manual + create_bgp_config: true + cdp: false + snmp_trap: true + nxapi: false + nxapi_http: false + nxapi_https_port: 443 + nxapi_http_port: 80 + performance_monitoring: false + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + sub_interface_dot1q_range: "2-511" + power_redundancy_mode: redundant + ptp: false + ptp_domain_id: 0 + ptp_loopback_id: 0 + mpls_handoff: false + mpls_loopback_ip_range: "10.102.0.0/25" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 + +# Common eBGP fabric configuration for all eBGP tests +# common_ebgp_fabric_config: +fabric_config_ebgp: + category: fabric + location: + latitude: 37.7749 + longitude: -122.4194 + license_tier: premier + alert_suspend: disabled + security_domain: all + telemetry_collection: false + management: + type: vxlanEbgp + bgp_asn: "65001" + bgp_asn_auto_allocation: false + site_id: "65001" + bgp_as_mode: multiAS + bgp_allow_as_in_num: 1 + bgp_max_path: 4 + auto_configure_ebgp_evpn_peering: true + target_subnet_mask: 30 + anycast_gateway_mac: "2020.0000.00aa" + performance_monitoring: false + replication_mode: multicast + multicast_group_subnet: "239.1.1.0/25" + auto_generate_multicast_group_address: false + underlay_multicast_group_address_limit: 128 + tenant_routed_multicast: false + rendezvous_point_count: 2 + rendezvous_point_loopback_id: 254 + vpc_peer_link_vlan: "3600" + vpc_peer_link_enable_native_vlan: false + vpc_peer_keep_alive_option: management + vpc_auto_recovery_timer: 360 + vpc_delay_restore_timer: 150 + vpc_peer_link_port_channel_id: "500" + advertise_physical_ip: false + vpc_domain_id_range: "1-1000" + bgp_loopback_id: 0 + nve_loopback_id: 1 + vrf_template: Default_VRF_Universal + network_template: Default_Network_Universal + vrf_extension_template: Default_VRF_Extension_Universal + network_extension_template: Default_Network_Extension_Universal + l3_vni_no_vlan_default_option: false + fabric_mtu: 9216 + l2_host_interface_mtu: 9216 + tenant_dhcp: true + nxapi: false + nxapi_https_port: 443 + nxapi_http: false + nxapi_http_port: 80 + snmp_trap: true + anycast_border_gateway_advertise_physical_ip: false + greenfield_debug_flag: disable + tcam_allocation: true + real_time_interface_statistics_collection: false + interface_statistics_load_interval: 10 + bgp_loopback_ip_range: "10.2.0.0/22" + nve_loopback_ip_range: "10.3.0.0/22" + anycast_rendezvous_point_ip_range: "10.254.254.0/24" + intra_fabric_subnet_range: "10.4.0.0/16" + l2_vni_range: "30000-49000" + l3_vni_range: "50000-59000" + network_vlan_range: "2300-2999" + vrf_vlan_range: "2000-2299" + sub_interface_dot1q_range: "2-511" + vrf_lite_auto_config: manual + vrf_lite_subnet_range: "10.33.0.0/16" + vrf_lite_subnet_target_mask: 30 + auto_unique_vrf_lite_ip_prefix: false + per_vrf_loopback_auto_provision: true + per_vrf_loopback_ip_range: "10.5.0.0/22" + banner: "" + day0_bootstrap: false + local_dhcp_server: false + dhcp_protocol_version: dhcpv4 + dhcp_start_address: "" + dhcp_end_address: "" + management_gateway: "" + management_ipv4_prefix: 24 diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py new file mode 100644 index 00000000..cb1b17d4 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -0,0 +1,736 @@ +# Copyright: (c) 2026, Mike Wiebe (@mwiebe) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics.py + +Tests the ND Manage Fabrics endpoint classes +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricsDelete, + EpManageFabricsGet, + EpManageFabricsListGet, + EpManageFabricsPost, + EpManageFabricsPut, + EpManageFabricsSummaryGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00010(): + """ + # Summary + + Verify EpManageFabricsGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsGet.__init__() + - EpManageFabricsGet.verb + - EpManageFabricsGet.class_name + """ + with does_not_raise(): + instance = EpManageFabricsGet() + assert instance.class_name == "EpApiV1ManageFabricsGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_00020(): + """ + # Summary + + Verify EpManageFabricsGet path with fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics/my-fabric" when fabric_name is set + + ## Classes and Methods + + - EpManageFabricsGet.path + - EpManageFabricsGet.fabric_name + """ + with does_not_raise(): + instance = EpManageFabricsGet() + instance.fabric_name = "my-fabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric" + + +def test_endpoints_api_v1_manage_fabrics_00030(): + """ + # Summary + + Verify EpManageFabricsGet path without fabric_name raises ValueError + + ## Test + + - Accessing path without setting fabric_name raises ValueError + + ## Classes and Methods + + - EpManageFabricsGet.path + """ + with pytest.raises(ValueError): + instance = EpManageFabricsGet() + result = instance.path # noqa: F841 + + +def test_endpoints_api_v1_manage_fabrics_00040(): + """ + # Summary + + Verify EpManageFabricsGet path with fabric_name and cluster_name query param + + ## Test + + - path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsGet.path + - EpManageFabricsGet.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsGet() + instance.fabric_name = "my-fabric" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric?clusterName=cluster1" + + +# ============================================================================= +# Test: EpManageFabricsListGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00100(): + """ + # Summary + + Verify EpManageFabricsListGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsListGet.__init__() + - EpManageFabricsListGet.verb + - EpManageFabricsListGet.class_name + """ + with does_not_raise(): + instance = EpManageFabricsListGet() + assert instance.class_name == "EpApiV1ManageFabricsListGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_00110(): + """ + # Summary + + Verify EpManageFabricsListGet path without fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics" when fabric_name is not set + (no error since _require_fabric_name is False) + + ## Classes and Methods + + - EpManageFabricsListGet.path + """ + with does_not_raise(): + instance = EpManageFabricsListGet() + result = instance.path + assert result == "/api/v1/manage/fabrics" + + +def test_endpoints_api_v1_manage_fabrics_00120(): + """ + # Summary + + Verify EpManageFabricsListGet path with category and max query params + + ## Test + + - path includes category and max query parameters when set + + ## Classes and Methods + + - EpManageFabricsListGet.path + - EpManageFabricsListGet.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsListGet() + instance.endpoint_params.category = "fabric" + instance.endpoint_params.max = 10 + result = instance.path + assert "category=fabric" in result + assert "max=10" in result + assert result.startswith("/api/v1/manage/fabrics?") + + +def test_endpoints_api_v1_manage_fabrics_00130(): + """ + # Summary + + Verify EpManageFabricsListGet path with all query params + + ## Test + + - path includes all query parameters when set + (cluster_name, category, filter, max, offset, sort) + + ## Classes and Methods + + - EpManageFabricsListGet.path + - EpManageFabricsListGet.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsListGet() + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.category = "fabric" + instance.endpoint_params.filter = "name:test" + instance.endpoint_params.max = 25 + instance.endpoint_params.offset = 5 + instance.endpoint_params.sort = "name:desc" + result = instance.path + assert "clusterName=cluster1" in result + assert "category=fabric" in result + assert "max=25" in result + assert "offset=5" in result + assert "sort=name%3Adesc" in result or "sort=name:desc" in result + assert result.startswith("/api/v1/manage/fabrics?") + + +def test_endpoints_api_v1_manage_fabrics_00140(): + """ + # Summary + + Verify EpManageFabricsListGet set_identifiers with None + + ## Test + + - set_identifiers(None) leaves fabric_name as None and path still works + + ## Classes and Methods + + - EpManageFabricsListGet.set_identifiers + - EpManageFabricsListGet.path + """ + with does_not_raise(): + instance = EpManageFabricsListGet() + instance.set_identifiers(None) + result = instance.path + assert instance.fabric_name is None + assert result == "/api/v1/manage/fabrics" + + +def test_endpoints_api_v1_manage_fabrics_00150(): + """ + # Summary + + Verify Pydantic validation rejects max < 1 + + ## Test + + - Setting max to 0 raises ValueError (ge=1 constraint) + + ## Classes and Methods + + - FabricsListEndpointParams.max + """ + with pytest.raises(ValueError): + instance = EpManageFabricsListGet() + instance.endpoint_params = type(instance.endpoint_params)(max=0) + + +# ============================================================================= +# Test: EpManageFabricsPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00200(): + """ + # Summary + + Verify EpManageFabricsPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsPost.__init__() + - EpManageFabricsPost.verb + - EpManageFabricsPost.class_name + """ + with does_not_raise(): + instance = EpManageFabricsPost() + assert instance.class_name == "EpApiV1ManageFabricsPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_00210(): + """ + # Summary + + Verify EpManageFabricsPost path without fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics" when fabric_name is not set + (no error since _require_fabric_name is False) + + ## Classes and Methods + + - EpManageFabricsPost.path + """ + with does_not_raise(): + instance = EpManageFabricsPost() + result = instance.path + assert result == "/api/v1/manage/fabrics" + + +def test_endpoints_api_v1_manage_fabrics_00220(): + """ + # Summary + + Verify EpManageFabricsPost path with cluster_name query param + + ## Test + + - path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsPost.path + - EpManageFabricsPost.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsPost() + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics?clusterName=cluster1" + + +def test_endpoints_api_v1_manage_fabrics_00230(): + """ + # Summary + + Verify EpManageFabricsPost set_identifiers sets fabric_name + + ## Test + + - set_identifiers sets fabric_name (POST doesn't require it but allows it) + + ## Classes and Methods + + - EpManageFabricsPost.set_identifiers + """ + with does_not_raise(): + instance = EpManageFabricsPost() + instance.set_identifiers("test-fabric") + assert instance.fabric_name == "test-fabric" + + +# ============================================================================= +# Test: EpManageFabricsPut +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00300(): + """ + # Summary + + Verify EpManageFabricsPut basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is PUT + + ## Classes and Methods + + - EpManageFabricsPut.__init__() + - EpManageFabricsPut.verb + - EpManageFabricsPut.class_name + """ + with does_not_raise(): + instance = EpManageFabricsPut() + assert instance.class_name == "EpApiV1ManageFabricsPut" + assert instance.verb == HttpVerbEnum.PUT + + +def test_endpoints_api_v1_manage_fabrics_00310(): + """ + # Summary + + Verify EpManageFabricsPut path with fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics/my-fabric" when fabric_name is set + + ## Classes and Methods + + - EpManageFabricsPut.path + - EpManageFabricsPut.fabric_name + """ + with does_not_raise(): + instance = EpManageFabricsPut() + instance.fabric_name = "my-fabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric" + + +def test_endpoints_api_v1_manage_fabrics_00320(): + """ + # Summary + + Verify EpManageFabricsPut path without fabric_name raises ValueError + + ## Test + + - Accessing path without setting fabric_name raises ValueError + + ## Classes and Methods + + - EpManageFabricsPut.path + """ + with pytest.raises(ValueError): + instance = EpManageFabricsPut() + result = instance.path # noqa: F841 + + +def test_endpoints_api_v1_manage_fabrics_00340(): + """ + # Summary + + Verify EpManageFabricsPut path with fabric_name and cluster_name query param + + ## Test + + - path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsPut.path + - EpManageFabricsPut.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsPut() + instance.fabric_name = "my-fabric" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric?clusterName=cluster1" + + +# ============================================================================= +# Test: EpManageFabricsDelete +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00400(): + """ + # Summary + + Verify EpManageFabricsDelete basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is DELETE + + ## Classes and Methods + + - EpManageFabricsDelete.__init__() + - EpManageFabricsDelete.verb + - EpManageFabricsDelete.class_name + """ + with does_not_raise(): + instance = EpManageFabricsDelete() + assert instance.class_name == "EpApiV1ManageFabricsDelete" + assert instance.verb == HttpVerbEnum.DELETE + + +def test_endpoints_api_v1_manage_fabrics_00410(): + """ + # Summary + + Verify EpManageFabricsDelete path with fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics/my-fabric" when fabric_name is set + + ## Classes and Methods + + - EpManageFabricsDelete.path + - EpManageFabricsDelete.fabric_name + """ + with does_not_raise(): + instance = EpManageFabricsDelete() + instance.fabric_name = "my-fabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric" + + +def test_endpoints_api_v1_manage_fabrics_00420(): + """ + # Summary + + Verify EpManageFabricsDelete path without fabric_name raises ValueError + + ## Test + + - Accessing path without setting fabric_name raises ValueError + + ## Classes and Methods + + - EpManageFabricsDelete.path + """ + with pytest.raises(ValueError): + instance = EpManageFabricsDelete() + result = instance.path # noqa: F841 + + +def test_endpoints_api_v1_manage_fabrics_00430(): + """ + # Summary + + Verify EpManageFabricsDelete path with fabric_name and cluster_name query param + + ## Test + + - path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsDelete.path + - EpManageFabricsDelete.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsDelete() + instance.fabric_name = "my-fabric" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric?clusterName=cluster1" + + +# ============================================================================= +# Test: EpManageFabricsSummaryGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00500(): + """ + # Summary + + Verify EpManageFabricsSummaryGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsSummaryGet.__init__() + - EpManageFabricsSummaryGet.verb + - EpManageFabricsSummaryGet.class_name + """ + with does_not_raise(): + instance = EpManageFabricsSummaryGet() + assert instance.class_name == "EpApiV1ManageFabricsSummaryGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_00510(): + """ + # Summary + + Verify EpManageFabricsSummaryGet path with fabric_name + + ## Test + + - path returns "/api/v1/manage/fabrics/my-fabric/summary" when fabric_name is set + + ## Classes and Methods + + - EpManageFabricsSummaryGet.path + - EpManageFabricsSummaryGet.fabric_name + """ + with does_not_raise(): + instance = EpManageFabricsSummaryGet() + instance.fabric_name = "my-fabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric/summary" + + +def test_endpoints_api_v1_manage_fabrics_00520(): + """ + # Summary + + Verify EpManageFabricsSummaryGet path without fabric_name raises ValueError + + ## Test + + - Accessing path without setting fabric_name raises ValueError + + ## Classes and Methods + + - EpManageFabricsSummaryGet.path + """ + with pytest.raises(ValueError): + instance = EpManageFabricsSummaryGet() + result = instance.path # noqa: F841 + + +def test_endpoints_api_v1_manage_fabrics_00530(): + """ + # Summary + + Verify EpManageFabricsSummaryGet path with fabric_name and cluster_name query param + + ## Test + + - path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsSummaryGet.path + - EpManageFabricsSummaryGet.endpoint_params + """ + with does_not_raise(): + instance = EpManageFabricsSummaryGet() + instance.fabric_name = "my-fabric" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/my-fabric/summary?clusterName=cluster1" + + +# ============================================================================= +# Test: All HTTP methods on same endpoint +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00600(): + """ + # Summary + + Verify all HTTP verbs produce correct paths and verbs for the same fabric_name + + ## Test + + - GET, POST, PUT, DELETE all return correct paths for same fabric_name + - Each endpoint returns the correct HTTP verb + + ## Classes and Methods + + - EpManageFabricsGet + - EpManageFabricsPost + - EpManageFabricsPut + - EpManageFabricsDelete + """ + fabric_name = "test-fabric" + + with does_not_raise(): + get_ep = EpManageFabricsGet() + get_ep.fabric_name = fabric_name + + post_ep = EpManageFabricsPost() + # POST is collection-level, but fabric_name can still be set + post_ep.fabric_name = fabric_name + + put_ep = EpManageFabricsPut() + put_ep.fabric_name = fabric_name + + delete_ep = EpManageFabricsDelete() + delete_ep.fabric_name = fabric_name + + expected_path = "/api/v1/manage/fabrics/test-fabric" + assert get_ep.path == expected_path + assert post_ep.path == expected_path + assert put_ep.path == expected_path + assert delete_ep.path == expected_path + + assert get_ep.verb == HttpVerbEnum.GET + assert post_ep.verb == HttpVerbEnum.POST + assert put_ep.verb == HttpVerbEnum.PUT + assert delete_ep.verb == HttpVerbEnum.DELETE + + +# ============================================================================= +# Test: Pydantic validation +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00610(): + """ + # Summary + + Verify Pydantic validation rejects empty string for fabric_name + + ## Test + + - Empty string is rejected for fabric_name (min_length=1) + + ## Classes and Methods + + - EpManageFabricsGet.__init__() + """ + with pytest.raises(ValueError): + instance = EpManageFabricsGet() + instance.fabric_name = "" + + +def test_endpoints_api_v1_manage_fabrics_00620(): + """ + # Summary + + Verify Pydantic validation rejects fabric_name exceeding max_length + + ## Test + + - fabric_name longer than 64 characters is rejected (max_length=64) + + ## Classes and Methods + + - EpManageFabricsGet.__init__() + """ + with pytest.raises(ValueError): + instance = EpManageFabricsGet() + instance.fabric_name = "a" * 65 From ebabd784c1597b9121df6b7e9f58210fd4bd7df5 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 14:12:42 +0530 Subject: [PATCH 098/109] Deploy + Config Save Changes --- .../v1/manage/manage_fabrics_switchactions.py | 75 +++++++++++++++++++ .../manage_switches/nd_switch_resources.py | 52 ++++++++++--- .../models/manage_switches/config_models.py | 14 +++- plugins/module_utils/utils.py | 38 ++++++++++ plugins/modules/nd_manage_switches.py | 40 +++++++--- .../nd_manage_switches/tasks/base_tasks.yaml | 5 +- .../nd_manage_switches/tests/deleted.yaml | 2 +- .../nd_manage_switches/tests/gathered.yaml | 2 +- .../nd_manage_switches/tests/merged.yaml | 12 +-- .../nd_manage_switches/tests/overridden.yaml | 8 +- .../nd_manage_switches/tests/poap.yaml | 13 ++-- .../nd_manage_switches/tests/replaced.yaml | 6 +- .../targets/nd_manage_switches/tests/rma.yaml | 7 +- .../nd_manage_switches/tests/sanity.yaml | 6 +- 14 files changed, 230 insertions(+), 50 deletions(-) diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py index d217a9ca..2d5aaa42 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py @@ -527,3 +527,78 @@ def path(self) -> str: def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.POST + + +class EpManageFabricsSwitchActionsDeployPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Switch-Level Config Deploy Endpoint + + ## Description + + Endpoint to deploy pending configuration for specific switches in a fabric. + Unlike the global ``configDeploy`` endpoint, this deploys only the specified + switches identified by their serial numbers. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/deploy + - /api/v1/manage/fabrics/{fabricName}/switchActions/deploy?ticketId=CHG12345 + + ## Verb + + - POST + + ## Body + + ```json + {"switchIds": ["FOC21373AFA", "FVT93126SKE"]} + ``` + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + request = EpManageFabricsSwitchActionsDeployPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + # POST body: {"switchIds": ["FOC21373AFA"]} + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsDeployPost"] = Field( + default="EpManageFabricsSwitchActionsDeployPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/deploy" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index ebcb3c4a..1b377176 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -125,6 +125,7 @@ class SwitchServiceContext: log: logging.Logger save_config: bool = True deploy_config: bool = True + deploy_type: str = "switch" # ========================================================================= @@ -1198,12 +1199,17 @@ def bulk_update_roles( log.debug("EXIT: bulk_update_roles()") - def finalize(self) -> None: + def finalize(self, serial_numbers: Optional[List[str]] = None) -> None: """Run optional save and deploy actions for the fabric. Uses service context flags to decide whether save and deploy should be executed. No-op in check mode. + Args: + serial_numbers: Switch serial numbers to deploy when + ``deploy_type`` is ``switch``. Falls back to + global deploy if empty or ``None``. + Returns: None. """ @@ -1215,8 +1221,17 @@ def finalize(self) -> None: self.fabric_utils.save_config() if self.ctx.deploy_config: - self.ctx.log.info("Deploying fabric configuration") - self.fabric_utils.deploy_config() + if self.ctx.deploy_type == "switch" and serial_numbers: + self.ctx.log.info("Switch-level deploy for: %s", serial_numbers) + self.fabric_utils.deploy_switches(serial_numbers) + else: + if self.ctx.deploy_type == "switch" and not serial_numbers: + self.ctx.log.warning( + "Switch-level deploy requested but no serial numbers provided " + "— falling back to global deploy" + ) + self.ctx.log.info("Deploying fabric configuration (global)") + self.fabric_utils.deploy_config() def post_add_processing( self, @@ -1272,7 +1287,7 @@ def post_add_processing( self.bulk_update_roles(switch_actions) try: - self.finalize() + self.finalize(serial_numbers=all_serials) except Exception as e: msg = f"Failed to finalize (config-save/deploy) for " f"{context} switches {all_serials}: {e}" log.error(msg) @@ -2139,7 +2154,7 @@ def handle( self.fabric_ops.bulk_save_credentials(switch_actions) try: - self.fabric_ops.finalize() + self.fabric_ops.finalize(serial_numbers=all_new_serials) except Exception as e: msg = f"Failed to finalize (config-save/deploy) for RMA " f"switches {all_new_serials}: {e}" log.error(msg) @@ -2400,13 +2415,15 @@ def __init__( self.state = self.module.params.get("state") # Shared context for service classes + config_actions = self.module.params.get("config_actions") or {} self.ctx = SwitchServiceContext( nd=nd, results=results, fabric=self.fabric, log=log, - save_config=self.module.params.get("save"), - deploy_config=self.module.params.get("deploy"), + save_config=config_actions.get("save", True), + deploy_config=config_actions.get("deploy", True), + deploy_type=config_actions.get("type", "switch"), ) # Switch collections @@ -2848,7 +2865,12 @@ def _handle_merged_state( ) elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") - self.fabric_ops.finalize() + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id + for cfg in plan.idempotent + if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) # --- POAP / preprovision / swap / RMA ----------------------------------- # normal_readd was already processed via bulk_add above. @@ -3032,7 +3054,12 @@ def _handle_overridden_state( ) elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") - self.fabric_ops.finalize() + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id + for cfg in plan.idempotent + if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- # plan.to_delete_existing was deleted in Phase 1. @@ -3207,7 +3234,12 @@ def _handle_replaced_state( ) elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") - self.fabric_ops.finalize() + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id + for cfg in plan.idempotent + if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index ed0a77e9..aa4e70cd 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -574,8 +574,18 @@ def get_argument_spec(cls) -> Dict[str, Any]: default="merged", choices=["merged", "replaced", "overridden", "deleted", "gathered"], ), - save=dict(type="bool", default=True), - deploy=dict(type="bool", default=True), + config_actions=dict( + type="dict", + options=dict( + save=dict(type="bool", default=True), + deploy=dict(type="bool", default=True), + type=dict( + type="str", + default="switch", + choices=["switch", "global"], + ), + ), + ), config=dict( type="list", elements="dict", diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 355c41a7..76a1e058 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -16,6 +16,9 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( EpManageFabricsActionsConfigSavePost, ) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsDeployPost, +) def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -129,6 +132,9 @@ def __init__( self.ep_config_deploy = EpManageFabricConfigDeployPost() self.ep_config_deploy.fabric_name = fabric + self.ep_switch_deploy = EpManageFabricsSwitchActionsDeployPost() + self.ep_switch_deploy.fabric_name = fabric + self.ep_fabric_get = EpManageFabricGet() self.ep_fabric_get.fabric_name = fabric @@ -201,6 +207,38 @@ def deploy_config(self) -> Dict[str, Any]: """ return self._request_endpoint(self.ep_config_deploy, action="Config deploy") + def deploy_switches(self, serial_numbers: List[str]) -> Dict[str, Any]: + """Deploy pending configuration for specific switches only. + + Uses the switch-level deploy endpoint which targets only the supplied + switches rather than all pending changes for the entire fabric. + + Args: + serial_numbers: Switch serial numbers (identifiers) to deploy. + + Returns: + API response dict. + + Raises: + SwitchOperationError: If the deploy request fails. + """ + self.log.info( + "Switch-level deploy for %s switch(es) in fabric: %s", + len(serial_numbers), + self.fabric, + ) + try: + response = self.nd.request( + self.ep_switch_deploy.path, + verb=self.ep_switch_deploy.verb, + data={"switchIds": serial_numbers}, + ) + self.log.info("Switch-level deploy completed for fabric: %s", self.fabric) + return response + except Exception as e: + self.log.error("Switch-level deploy failed for fabric %s: %s", self.fabric, e) + raise SwitchOperationError(f"Switch-level deploy failed for fabric {self.fabric}: {e}") from e + def get_fabric_info(self) -> Dict[str, Any]: """Retrieve fabric information. diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 25ac4af7..9446e077 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -45,17 +45,32 @@ - overridden - deleted - gathered - save: + config_actions: description: - - Save/Recalculate the configuration of the fabric after inventory is updated. - type: bool - default: true - deploy: - description: - - Deploy the pending configuration of the fabric after inventory is updated. - - When set to C(true), C(save) must also be C(true). - type: bool - default: true + - Controls save and deploy behavior after inventory is updated. + type: dict + suboptions: + save: + description: + - Save/Recalculate the configuration of the fabric after inventory is updated. + type: bool + default: true + deploy: + description: + - Deploy the pending configuration after inventory is updated. + - When set to C(true), C(save) must also be C(true). + type: bool + default: true + type: + description: + - Scope of the deploy operation. + - C(switch) deploys only the switches affected in this run. + - C(global) deploys all pending changes for the entire fabric. + type: str + default: switch + choices: + - switch + - global config: description: - List of switch configurations. Optional for state C(deleted). @@ -408,8 +423,9 @@ def main(): require_pydantic(module) - if module.params.get("deploy") and not module.params.get("save"): - module.fail_json(msg="'deploy: true' requires 'save: true'") + config_actions = module.params.get("config_actions") or {} + if config_actions.get("deploy", True) and not config_actions.get("save", True): + module.fail_json(msg="'config_actions.deploy: true' requires 'config_actions.save: true'") # Initialize logging try: diff --git a/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml index da143944..c858d2b4 100644 --- a/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml +++ b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml @@ -16,7 +16,10 @@ sw1: "{{ ansible_switch1 }}" sw2: "{{ ansible_switch2 }}" sw3: "{{ ansible_switch3 }}" - deploy: "{{ deploy }}" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" delegate_to: localhost # ---------------------------------------------- diff --git a/tests/integration/targets/nd_manage_switches/tests/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml index a981a23c..b5f4b6bd 100644 --- a/tests/integration/targets/nd_manage_switches/tests/deleted.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml @@ -13,7 +13,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: deleted diff --git a/tests/integration/targets/nd_manage_switches/tests/gathered.yaml b/tests/integration/targets/nd_manage_switches/tests/gathered.yaml index 6fb378d9..f3468757 100644 --- a/tests/integration/targets/nd_manage_switches/tests/gathered.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/gathered.yaml @@ -13,7 +13,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: create_result tags: query diff --git a/tests/integration/targets/nd_manage_switches/tests/merged.yaml b/tests/integration/targets/nd_manage_switches/tests/merged.yaml index b94180d4..a5b25f7d 100644 --- a/tests/integration/targets/nd_manage_switches/tests/merged.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/merged.yaml @@ -13,7 +13,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: merged @@ -96,7 +96,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_merge_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: merged @@ -154,7 +154,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_merge_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: merged @@ -223,7 +223,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_merge_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" ignore_errors: true register: merged_result tags: merged @@ -256,7 +256,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_merge_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result ignore_errors: true tags: merged @@ -289,7 +289,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_merge_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result ignore_errors: true tags: merged diff --git a/tests/integration/targets/nd_manage_switches/tests/overridden.yaml b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml index 0f4c2942..cee43173 100644 --- a/tests/integration/targets/nd_manage_switches/tests/overridden.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml @@ -13,7 +13,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: overridden @@ -66,7 +66,7 @@ fabric: "{{ test_data.test_fabric }}" state: overridden config: "{{ nd_switches_overridden_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: overridden_result tags: overridden @@ -106,7 +106,7 @@ fabric: "{{ test_data.test_fabric }}" state: overridden config: "{{ nd_switches_overridden_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: overridden_result tags: overridden @@ -145,7 +145,7 @@ fabric: "{{ test_data.test_fabric }}" state: overridden config: "{{ nd_switches_overridden_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: overridden_result tags: overridden diff --git a/tests/integration/targets/nd_manage_switches/tests/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/poap.yaml index 4b569004..31dc8f0b 100644 --- a/tests/integration/targets/nd_manage_switches/tests/poap.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/poap.yaml @@ -22,7 +22,10 @@ poap_configmodel: "['ABC-D1230a']" poap_gateway: "192.168.2.1/24" sw3: "{{ ansible_switch3 }}" - deploy: "{{ deploy }}" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" poap_enabled: false delegate_to: localhost tags: poap @@ -145,7 +148,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_poap_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" when: poap_enabled == True register: merged_result tags: poap @@ -178,7 +181,7 @@ ansible.builtin.assert: that: - 'merged_result.changed == false' - # - 'merged_result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'merged_result.msg == "No switches to merge — fabric already matches desired config"' when: poap_enabled == True tags: poap @@ -212,7 +215,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_poap_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" when: poap_enabled == True register: merged_result tags: poap @@ -245,7 +248,7 @@ ansible.builtin.assert: that: - 'result.changed == false' - # - 'result.response == "The switch provided is already part of the fabric and cannot be created again"' + - 'result.msg == "No switches to merge — fabric already matches desired config"' when: poap_enabled == True tags: poap diff --git a/tests/integration/targets/nd_manage_switches/tests/replaced.yaml b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml index 2f9b50ca..70415a19 100644 --- a/tests/integration/targets/nd_manage_switches/tests/replaced.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml @@ -13,7 +13,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: merged_result tags: replaced @@ -66,7 +66,7 @@ fabric: "{{ test_data.test_fabric }}" state: replaced config: "{{ nd_switches_replaced_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: replaced_result tags: replaced @@ -113,7 +113,7 @@ fabric: "{{ test_data.test_fabric }}" state: replaced config: "{{ nd_switches_replaced_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: replaced_result tags: replaced diff --git a/tests/integration/targets/nd_manage_switches/tests/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/rma.yaml index a4daff9a..009231d4 100644 --- a/tests/integration/targets/nd_manage_switches/tests/rma.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/rma.yaml @@ -14,7 +14,10 @@ sw1: "{{ ansible_switch1 }}" sw1_serial: "1ABC23DEFGH" sw1_rma_serial: "1ABC23DERMA" - deploy: "{{ deploy }}" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" rma_enabled: false delegate_to: localhost tags: rma @@ -98,7 +101,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_rma_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" when: rma_enabled == True register: merged_result tags: rma diff --git a/tests/integration/targets/nd_manage_switches/tests/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml index bad975f7..4d93bae0 100644 --- a/tests/integration/targets/nd_manage_switches/tests/sanity.yaml +++ b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml @@ -18,7 +18,7 @@ fabric: "{{ test_data.test_fabric }}" state: merged config: "{{ nd_switches_base_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: create_result tags: sanity @@ -114,7 +114,7 @@ fabric: "{{ test_data.test_fabric }}" state: replaced config: "{{ nd_switches_sanity_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: replaced_result tags: sanity @@ -178,7 +178,7 @@ fabric: "{{ test_data.test_fabric }}" state: overridden config: "{{ nd_switches_sanity_conf }}" - deploy: "{{ test_data.deploy }}" + config_actions: "{{ test_data.config_actions }}" register: overriden_result tags: sanity From eb80a4a601d222f8dbb2a3958e54d29dc3066a3c Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 14:34:14 +0530 Subject: [PATCH 099/109] Merge_Prep --- .../v1/manage/{manage_fabrics.py => manage_fabrics_t.py} | 0 ...age_fabrics.py => test_endpoints_api_v1_manage_fabrics_t.py} | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename plugins/module_utils/endpoints/v1/manage/{manage_fabrics.py => manage_fabrics_t.py} (100%) rename tests/unit/module_utils/endpoints/{test_endpoints_api_v1_manage_fabrics.py => test_endpoints_api_v1_manage_fabrics_t.py} (98%) diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py similarity index 100% rename from plugins/module_utils/endpoints/v1/manage/manage_fabrics.py rename to plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py similarity index 98% rename from tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py rename to tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py index 60267297..6abe591b 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py @@ -15,7 +15,7 @@ # pylint: enable=invalid-name import pytest -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( +from NDBranch.ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_t import ( EpManageFabricConfigDeployPost, EpManageFabricGet, FabricConfigDeployEndpointParams, From a35bd16b2cae3c0f9f6c5d3545cd484613cd83c9 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 16:50:42 +0530 Subject: [PATCH 100/109] Branch Merge --- .../endpoints/v1/manage/manage_fabrics.py | 135 ++++++++++-- .../endpoints/v1/manage/manage_fabrics_t.py | 202 ------------------ plugins/module_utils/utils.py | 4 +- .../test_endpoints_api_v1_manage_fabrics.py | 191 ++++++++++++++++- 4 files changed, 301 insertions(+), 231 deletions(-) delete mode 100644 plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index 8a9b1c2b..db16814d 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -11,18 +11,20 @@ ## Endpoints -- `EpApiV1ManageFabricsGet` - Get a specific fabric by name +- `EpManageFabricsGet` - Get a specific fabric by name (GET /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsListGet` - List all fabrics with optional filtering +- `EpManageFabricsListGet` - List all fabrics with optional filtering (GET /api/v1/manage/fabrics) -- `EpApiV1ManageFabricsPost` - Create a new fabric +- `EpManageFabricsPost` - Create a new fabric (POST /api/v1/manage/fabrics) -- `EpApiV1ManageFabricsPut` - Update a specific fabric +- `EpManageFabricsPut` - Update a specific fabric (PUT /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsDelete` - Delete a specific fabric +- `EpManageFabricsDelete` - Delete a specific fabric (DELETE /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsSummaryGet` - Get summary for a specific fabric +- `EpManageFabricsSummaryGet` - Get summary for a specific fabric (GET /api/v1/manage/fabrics/{fabric_name}/summary) +- `EpManageFabricConfigDeployPost` - Deploy pending config for a fabric + (POST /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy) """ from __future__ import annotations @@ -67,6 +69,30 @@ class FabricsEndpointParams(EndpointQueryParams): ) +class FabricConfigDeployEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for the fabric config deploy endpoint. + + ## Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + params = FabricConfigDeployEndpointParams(force_show_run=True) + query_string = params.to_query_string() + # Returns: "forceShowRun=true" + ``` + """ + + force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") + incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") + + class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for ND Manage Fabrics endpoints. @@ -149,14 +175,14 @@ class EpManageFabricsGet(_EpManageFabricsBase): ```python # Get details for a specific fabric - request = EpApiV1ManageFabricsGet() + request = EpManageFabricsGet() request.fabric_name = "my-fabric" path = request.path verb = request.verb # Path will be: /api/v1/manage/fabrics/my-fabric # Get fabric details targeting a specific cluster in a multi-cluster deployment - request = EpApiV1ManageFabricsGet() + request = EpManageFabricsGet() request.fabric_name = "my-fabric" request.endpoint_params.cluster_name = "cluster1" path = request.path @@ -165,7 +191,7 @@ class EpManageFabricsGet(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsGet"] = Field(default="EpApiV1ManageFabricsGet", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsGet"] = Field(default="EpManageFabricsGet", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -261,13 +287,13 @@ class EpManageFabricsListGet(_EpManageFabricsBase): ```python # List all fabrics - ep = EpApiV1ManageFabricsListGet() + ep = EpManageFabricsListGet() path = ep.path verb = ep.verb # Path: /api/v1/manage/fabrics # List fabrics with filtering and pagination - ep = EpApiV1ManageFabricsListGet() + ep = EpManageFabricsListGet() ep.endpoint_params.category = "fabric" ep.endpoint_params.max = 10 path = ep.path @@ -277,7 +303,7 @@ class EpManageFabricsListGet(_EpManageFabricsBase): _require_fabric_name: ClassVar[bool] = False - class_name: Literal["EpApiV1ManageFabricsListGet"] = Field(default="EpApiV1ManageFabricsListGet", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsListGet"] = Field(default="EpManageFabricsListGet", description="Class name for backward compatibility") endpoint_params: FabricsListEndpointParams = Field(default_factory=FabricsListEndpointParams, description="Endpoint-specific query parameters") @@ -325,7 +351,7 @@ class EpManageFabricsPost(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsPost() + ep = EpManageFabricsPost() rest_send.path = ep.path rest_send.verb = ep.verb rest_send.payload = { @@ -339,7 +365,7 @@ class EpManageFabricsPost(_EpManageFabricsBase): _require_fabric_name: ClassVar[bool] = False - class_name: Literal["EpApiV1ManageFabricsPost"] = Field(default="EpApiV1ManageFabricsPost", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsPost"] = Field(default="EpManageFabricsPost", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -381,7 +407,7 @@ class EpManageFabricsPut(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsPut() + ep = EpManageFabricsPut() ep.fabric_name = "my-fabric" rest_send.path = ep.path rest_send.verb = ep.verb @@ -393,7 +419,7 @@ class EpManageFabricsPut(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsPut"] = Field(default="EpApiV1ManageFabricsPut", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsPut"] = Field(default="EpManageFabricsPut", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -430,14 +456,14 @@ class EpManageFabricsDelete(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsDelete() + ep = EpManageFabricsDelete() ep.fabric_name = "my-fabric" rest_send.path = ep.path rest_send.verb = ep.verb ``` """ - class_name: Literal["EpApiV1ManageFabricsDelete"] = Field(default="EpApiV1ManageFabricsDelete", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsDelete"] = Field(default="EpManageFabricsDelete", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -474,7 +500,7 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsSummaryGet() + ep = EpManageFabricsSummaryGet() ep.fabric_name = "my-fabric" path = ep.path verb = ep.verb @@ -482,8 +508,8 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsSummaryGet"] = Field( - default="EpApiV1ManageFabricsSummaryGet", description="Class name for backward compatibility" + class_name: Literal["EpManageFabricsSummaryGet"] = Field( + default="EpManageFabricsSummaryGet", description="Class name for backward compatibility" ) _path_suffix: ClassVar[Optional[str]] = "summary" @@ -494,3 +520,70 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.GET + + +class EpManageFabricConfigDeployPost(_EpManageFabricsBase): + """ + # Summary + + Fabric Config Deploy Endpoint + + ## Description + + Endpoint to deploy pending configuration to all switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy + - /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy?forceShowRun=true + + ## Verb + + - POST + + ## Query Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + ep = EpManageFabricConfigDeployPost() + ep.fabric_name = "MyFabric" + path = ep.path + verb = ep.verb + + # With forceShowRun + ep.endpoint_params.force_show_run = True + path = ep.path + # Path: /api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true + ``` + """ + + class_name: Literal["EpManageFabricConfigDeployPost"] = Field( + default="EpManageFabricConfigDeployPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: FabricConfigDeployEndpointParams = Field( + default_factory=FabricConfigDeployEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base = BasePath.path("fabrics", self.fabric_name, "actions", "configDeploy") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py deleted file mode 100644 index c54716ac..00000000 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_t.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -""" -ND Manage Fabrics endpoint models. - -This module contains endpoint definitions for fabric-level operations -in the ND Manage API. - -Endpoints covered: -- Config deploy -- Get fabric info -""" - -from __future__ import absolute_import, annotations, division, print_function - -# pylint: disable=invalid-name -__metaclass__ = type -__author__ = "Akshayanat C S" -# pylint: enable=invalid-name - -from typing import Literal, Optional - -from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( - FabricNameMixin, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( - EndpointQueryParams, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( - BasePath, -) -from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( - Field, -) -from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( - NDEndpointBaseModel, -) - - -class FabricConfigDeployEndpointParams(EndpointQueryParams): - """ - # Summary - - Endpoint-specific query parameters for fabric config deploy endpoint. - - ## Parameters - - - force_show_run: Force show running config before deploy (optional) - - incl_all_msd_switches: Include all MSD fabric switches (optional) - - ## Usage - - ```python - params = FabricConfigDeployEndpointParams(force_show_run=True) - query_string = params.to_query_string() - # Returns: "forceShowRun=true" - ``` - """ - - force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") - incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") - - -class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): - """ - Base class for Fabrics endpoints. - - Provides common functionality for all HTTP methods on the - /api/v1/manage/fabrics/{fabricName} endpoint family. - """ - - @property - def _base_path(self) -> str: - """Build the base endpoint path.""" - if self.fabric_name is None: - raise ValueError("fabric_name must be set before accessing path") - return BasePath.path("fabrics", self.fabric_name) - - -class EpManageFabricConfigDeployPost(_EpManageFabricsBase): - """ - # Summary - - Fabric Config Deploy Endpoint - - ## Description - - Endpoint to deploy pending configuration to switches in a fabric. - - ## Path - - - /api/v1/manage/fabrics/{fabricName}/actions/configDeploy - - /api/v1/manage/fabrics/{fabricName}/actions/configDeploy?forceShowRun=true - - ## Verb - - - POST - - ## Query Parameters - - - force_show_run: Force show running config before deploy (optional) - - incl_all_msd_switches: Include all MSD fabric switches (optional) - - ## Usage - - ```python - # Deploy with defaults - request = EpManageFabricConfigDeployPost() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - - # Deploy forcing show run - request = EpManageFabricConfigDeployPost() - request.fabric_name = "MyFabric" - request.endpoint_params.force_show_run = True - path = request.path - verb = request.verb - # Path will be: /api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true - ``` - """ - - class_name: Literal["EpManageFabricConfigDeployPost"] = Field( - default="EpManageFabricConfigDeployPost", - frozen=True, - description="Class name for backward compatibility", - ) - endpoint_params: FabricConfigDeployEndpointParams = Field( - default_factory=FabricConfigDeployEndpointParams, - description="Endpoint-specific query parameters", - ) - - @property - def path(self) -> str: - """ - # Summary - - Build the endpoint path with optional query string. - - ## Returns - - - Complete endpoint path string, optionally including query parameters - """ - base = f"{self._base_path}/actions/configDeploy" - query_string = self.endpoint_params.to_query_string() - if query_string: - return f"{base}?{query_string}" - return base - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.POST - - -class EpManageFabricGet(_EpManageFabricsBase): - """ - # Summary - - Get Fabric Info Endpoint - - ## Description - - Endpoint to retrieve fabric information. - - ## Path - - - /api/v1/manage/fabrics/{fabricName} - - ## Verb - - - GET - - ## Usage - - ```python - request = EpManageFabricGet() - request.fabric_name = "MyFabric" - path = request.path - verb = request.verb - ``` - """ - - class_name: Literal["EpManageFabricGet"] = Field( - default="EpManageFabricGet", - frozen=True, - description="Class name for backward compatibility", - ) - - @property - def path(self) -> str: - """Build the endpoint path.""" - return self._base_path - - @property - def verb(self) -> HttpVerbEnum: - """Return the HTTP verb for this endpoint.""" - return HttpVerbEnum.GET diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index ea629dce..2bf08d4e 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -11,7 +11,7 @@ from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( EpManageFabricConfigDeployPost, - EpManageFabricGet, + EpManageFabricsGet, ) from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( EpManageFabricsActionsConfigSavePost, @@ -146,7 +146,7 @@ def __init__( self.ep_switch_deploy = EpManageFabricsSwitchActionsDeployPost() self.ep_switch_deploy.fabric_name = fabric - self.ep_fabric_get = EpManageFabricGet() + self.ep_fabric_get = EpManageFabricsGet() self.ep_fabric_get.fabric_name = fabric # ----------------------------------------------------------------- diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py index cb1b17d4..5e4b674a 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -16,12 +16,14 @@ import pytest # pylint: disable=unused-import from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricConfigDeployPost, EpManageFabricsDelete, EpManageFabricsGet, EpManageFabricsListGet, EpManageFabricsPost, EpManageFabricsPut, EpManageFabricsSummaryGet, + FabricConfigDeployEndpointParams, ) from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( @@ -53,7 +55,7 @@ def test_endpoints_api_v1_manage_fabrics_00010(): """ with does_not_raise(): instance = EpManageFabricsGet() - assert instance.class_name == "EpApiV1ManageFabricsGet" + assert instance.class_name == "EpManageFabricsGet" assert instance.verb == HttpVerbEnum.GET @@ -146,7 +148,7 @@ def test_endpoints_api_v1_manage_fabrics_00100(): """ with does_not_raise(): instance = EpManageFabricsListGet() - assert instance.class_name == "EpApiV1ManageFabricsListGet" + assert instance.class_name == "EpManageFabricsListGet" assert instance.verb == HttpVerbEnum.GET @@ -296,7 +298,7 @@ def test_endpoints_api_v1_manage_fabrics_00200(): """ with does_not_raise(): instance = EpManageFabricsPost() - assert instance.class_name == "EpApiV1ManageFabricsPost" + assert instance.class_name == "EpManageFabricsPost" assert instance.verb == HttpVerbEnum.POST @@ -388,7 +390,7 @@ def test_endpoints_api_v1_manage_fabrics_00300(): """ with does_not_raise(): instance = EpManageFabricsPut() - assert instance.class_name == "EpApiV1ManageFabricsPut" + assert instance.class_name == "EpManageFabricsPut" assert instance.verb == HttpVerbEnum.PUT @@ -481,7 +483,7 @@ def test_endpoints_api_v1_manage_fabrics_00400(): """ with does_not_raise(): instance = EpManageFabricsDelete() - assert instance.class_name == "EpApiV1ManageFabricsDelete" + assert instance.class_name == "EpManageFabricsDelete" assert instance.verb == HttpVerbEnum.DELETE @@ -574,7 +576,7 @@ def test_endpoints_api_v1_manage_fabrics_00500(): """ with does_not_raise(): instance = EpManageFabricsSummaryGet() - assert instance.class_name == "EpApiV1ManageFabricsSummaryGet" + assert instance.class_name == "EpManageFabricsSummaryGet" assert instance.verb == HttpVerbEnum.GET @@ -734,3 +736,180 @@ def test_endpoints_api_v1_manage_fabrics_00620(): with pytest.raises(ValueError): instance = EpManageFabricsGet() instance.fabric_name = "a" * 65 + + +# ============================================================================= +# Test: FabricConfigDeployEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00700(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams default values + + ## Test + + - force_show_run defaults to None + - incl_all_msd_switches defaults to None + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + assert params.force_show_run is None + assert params.incl_all_msd_switches is None + + +def test_endpoints_api_v1_manage_fabrics_00710(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams force_show_run can be set + + ## Test + + - force_show_run can be set to True + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True) + assert params.force_show_run is True + + +def test_endpoints_api_v1_manage_fabrics_00720(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams generates query string with both params + + ## Test + + - to_query_string() includes forceShowRun and inclAllMsdSwitches when both are set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) + result = params.to_query_string() + assert "forceShowRun=true" in result + assert "inclAllMsdSwitches=true" in result + + +def test_endpoints_api_v1_manage_fabrics_00730(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageFabricConfigDeployPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00800(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.__init__() + - EpManageFabricConfigDeployPost.class_name + - EpManageFabricConfigDeployPost.verb + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + assert instance.class_name == "EpManageFabricConfigDeployPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_00810(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + instance = EpManageFabricConfigDeployPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_00820(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy" + + +def test_endpoints_api_v1_manage_fabrics_00830(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path with force_show_run + + ## Test + + - path includes forceShowRun in query string when set to True + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.force_show_run = True + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" From 35eca1984c834e0355a4c2bc3a4e98a90114091f Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 18:18:06 +0530 Subject: [PATCH 101/109] Check Mode Changes --- .../manage_switches/nd_switch_resources.py | 164 ++++++++++++++++-- plugins/module_utils/rest/rest_send.py | 6 + 2 files changed, 160 insertions(+), 10 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 1b377176..1f029118 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2437,6 +2437,8 @@ def __init__( self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) self.sent_adds: List[SwitchConfigModel] = [] self.proposed_cfgs: List[SwitchConfigModel] = [] + # Plan stored here after compute_changes so check-mode output can use it + self._plan: Optional[SwitchPlan] = None except Exception as e: msg = f"Failed to query fabric '{self.fabric}' inventory " f"during initialization: {e}" log.error(msg) @@ -2498,6 +2500,131 @@ def _proposed_to_config_list(self, configs: List["SwitchConfigModel"]) -> List[D self.log.warning("Could not convert config %s for output: %s", cfg.seed_ip, exc) return result + def _build_check_mode_output(self) -> Dict[str, Any]: + """Build before/after/diff/changed output for check mode. + + Since no API writes are issued in check mode, ``self.sent`` and + ``self.sent_adds`` are always empty. This method derives the same + information directly from the action plan (``self._plan``) and the + real pre-operation inventory snapshot (``self.before``). + + For ``deleted`` state the plan may be ``None`` (no config supplied), + so the entire existing inventory is treated as the deletion target. + + Returns: + Dict suitable for merging into the final ``exit_json`` payload, + containing ``before``, ``after``, ``diff``, and ``changed``. + """ + before_list = self._inventory_to_config_list(self.before) + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.before} + diff_list: List[Dict[str, Any]] = [] + + if self._plan is not None: + plan = self._plan + + # Switches that would be deleted + deleted_sws: List[SwitchDataModel] = list(plan.to_delete) + list(plan.to_delete_existing) + if self.state == "deleted": + # _handle_deleted_state fills plan.to_delete only for + # overridden; for state=deleted the deletions come from the + # handler's own switch-by-switch loop which we replicate here. + deleted_sws = [ + sw for sw in self.before + if sw.fabric_management_ip in {cfg.seed_ip for cfg in (self.proposed_cfgs or [])} + or not self.proposed_cfgs + ] + for sw in deleted_sws: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + diff_list.append({ + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + }) + + # Switches that would be added (normal to_add + POAP/preprov/rma) + adds: List[SwitchConfigModel] = ( + list(plan.to_add) + + list(plan.normal_readd) + + list(plan.to_bootstrap) + + list(plan.to_preprovision) + + list(plan.to_swap) + + list(plan.to_rma) + ) + for cfg in adds: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + entry["_action"] = "added" + diff_list.append(entry) + except Exception as exc: + self.log.warning("check_mode diff: could not convert %s: %s", cfg.seed_ip, exc) + + # Switches whose role would be updated (overridden/replaced) + for cfg in plan.to_update: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + entry["_action"] = "updated" + diff_list.append(entry) + except Exception as exc: + self.log.warning("check_mode diff: could not convert %s: %s", cfg.seed_ip, exc) + + # Simulate the post-operation inventory for "after": + # start from before, remove deletions, add additions as stubs + deleted_ips = {sw.fabric_management_ip for sw in deleted_sws} + after_list = [e for e in before_list if e.get("seed_ip") not in deleted_ips] + for cfg in adds: + # Mirror the format produced by _inventory_to_config_list — no + # poap/preprovision sub-blocks since those reflect the user's + # desired discovery method, not the resulting inventory state. + role = cfg.role + after_list.append({ + "seed_ip": cfg.seed_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": bool(getattr(cfg, "preserve_config", False)), + "username": "", + "password": "", + }) + # Apply role updates in-place + update_role_map = {cfg.seed_ip: cfg for cfg in plan.to_update} + for entry in after_list: + ip = entry.get("seed_ip") + if ip in update_role_map: + role = update_role_map[ip].role + entry["role"] = getattr(role, "value", str(role)) if role else entry.get("role") + else: + # deleted state with no config — would delete everything + after_list = [] + for sw in self.before: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + diff_list.append({ + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + }) + + changed = bool(diff_list) + output_level = self.module.params.get("output_level", "normal") + result: Dict[str, Any] = { + "output_level": output_level, + "changed": changed, + "before": before_list, + "after": after_list, + "diff": diff_list, + } + if output_level in ("info", "debug"): + result["proposed"] = self._proposed_to_config_list(self.proposed_cfgs) + return result + def exit_json(self) -> None: """Finalize collected results and exit the Ansible module. @@ -2523,10 +2650,12 @@ def exit_json(self) -> None: self.nd.module.fail_json(msg=msg) self.output.assign(after=self.existing) final.update(self.output.format(gathered=gathered)) + elif self.nd.module.check_mode: + final.update(self._build_check_mode_output()) else: # Re-query the fabric to get the actual post-operation inventory so # that "after" reflects real state rather than the pre-op snapshot. - if True not in self.results.failed and not self.nd.module.check_mode: + if True not in self.results.failed: self.existing = NDConfigCollection.from_api_response( response_data=self._query_all_switches(), model_class=SwitchDataModel, @@ -2635,6 +2764,7 @@ def manage_state(self) -> None: # Classify all configs in one pass — idempotency included plan = SwitchDiffEngine.compute_changes(proposed_config, list(self.existing), self.log) + self._plan = plan # --- Single combined discovery pass ------------------------------------- # Discover every switch that is not yet in the fabric: @@ -2642,23 +2772,37 @@ def manage_state(self) -> None: # • plan.normal_readd — POAP/preprov mismatches that are reachable # Switches already in the fabric (to_update, migration_mode) are # skipped here; overridden will re-discover them after deletion. + # + # In check mode, discovery is skipped entirely: new switches are not + # yet reachable/enrolled so shallow discovery would fail or return no + # data. The per-state check-mode guards handle reporting via the diff. configs_to_discover = plan.to_add + plan.normal_readd if configs_to_discover: - self.log.info( - "Discovering %s switch(es): %s normal-add, %s poap-readd", - len(configs_to_discover), - len(plan.to_add), - len(plan.normal_readd), - ) - discovered_data = self.discovery.discover(configs_to_discover) + if self.nd.module.check_mode: + self.log.info( + "Check mode: skipping discovery for %s switch(es) (%s normal-add, %s poap-readd) — assuming to_add", + len(configs_to_discover), + len(plan.to_add), + len(plan.normal_readd), + ) + discovered_data = {} + else: + self.log.info( + "Discovering %s switch(es): %s normal-add, %s poap-readd", + len(configs_to_discover), + len(plan.to_add), + len(plan.normal_readd), + ) + discovered_data = self.discovery.discover(configs_to_discover) else: self.log.info("No switches need discovery in this run") discovered_data = {} # Build proposed SwitchDataModel collection for normal switches only - # (needed for the self.proposed reference used in check-mode reporting) + # (needed for the self.proposed reference used in check-mode reporting). + # Skipped in check mode since discovered_data is empty for new switches. normal_configs = [c for c in proposed_config if c.operation_type == "normal"] - if normal_configs: + if normal_configs and not self.nd.module.check_mode: built = self.discovery.build_proposed(normal_configs, discovered_data, list(self.existing)) self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) diff --git a/plugins/module_utils/rest/rest_send.py b/plugins/module_utils/rest/rest_send.py index 7631b0dd..850bfd55 100644 --- a/plugins/module_utils/rest/rest_send.py +++ b/plugins/module_utils/rest/rest_send.py @@ -275,6 +275,12 @@ def _commit_check_mode(self) -> None: msg += f"verb {self.verb}, path {self.path}." self.log.debug(msg) + # GET is read-only: execute against the real API so check-mode diffs + # reflect actual controller state rather than a fake empty response. + if self.verb == HttpVerbEnum.GET: + self._commit_normal_mode() + return + response_current: dict = {} response_current["RETURN_CODE"] = 200 response_current["METHOD"] = self.verb From 65fc1b29544d2b9ef90c47d8f2b39b3299e761f7 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 18:25:54 +0530 Subject: [PATCH 102/109] Black and Sanity Fix --- .../endpoints/v1/manage/manage_fabrics.py | 5 +- .../manage_switches/nd_switch_resources.py | 70 ++++++++----------- 2 files changed, 31 insertions(+), 44 deletions(-) diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index db16814d..8ae8803a 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -508,9 +508,7 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): ``` """ - class_name: Literal["EpManageFabricsSummaryGet"] = Field( - default="EpManageFabricsSummaryGet", description="Class name for backward compatibility" - ) + class_name: Literal["EpManageFabricsSummaryGet"] = Field(default="EpManageFabricsSummaryGet", description="Class name for backward compatibility") _path_suffix: ClassVar[Optional[str]] = "summary" @@ -586,4 +584,3 @@ def path(self) -> str: def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.POST - diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 1f029118..c41c41cb 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -1226,10 +1226,7 @@ def finalize(self, serial_numbers: Optional[List[str]] = None) -> None: self.fabric_utils.deploy_switches(serial_numbers) else: if self.ctx.deploy_type == "switch" and not serial_numbers: - self.ctx.log.warning( - "Switch-level deploy requested but no serial numbers provided " - "— falling back to global deploy" - ) + self.ctx.log.warning("Switch-level deploy requested but no serial numbers provided — falling back to global deploy") self.ctx.log.info("Deploying fabric configuration (global)") self.fabric_utils.deploy_config() @@ -2529,28 +2526,23 @@ def _build_check_mode_output(self) -> Dict[str, Any]: # overridden; for state=deleted the deletions come from the # handler's own switch-by-switch loop which we replicate here. deleted_sws = [ - sw for sw in self.before - if sw.fabric_management_ip in {cfg.seed_ip for cfg in (self.proposed_cfgs or [])} - or not self.proposed_cfgs + sw for sw in self.before if sw.fabric_management_ip in {cfg.seed_ip for cfg in (self.proposed_cfgs or [])} or not self.proposed_cfgs ] for sw in deleted_sws: if not sw.fabric_management_ip: continue role = sw.switch_role - diff_list.append({ - "seed_ip": sw.fabric_management_ip, - "role": getattr(role, "value", str(role)) if role else "leaf", - "_action": "deleted", - }) + diff_list.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + } + ) # Switches that would be added (normal to_add + POAP/preprov/rma) adds: List[SwitchConfigModel] = ( - list(plan.to_add) - + list(plan.normal_readd) - + list(plan.to_bootstrap) - + list(plan.to_preprovision) - + list(plan.to_swap) - + list(plan.to_rma) + list(plan.to_add) + list(plan.normal_readd) + list(plan.to_bootstrap) + list(plan.to_preprovision) + list(plan.to_swap) + list(plan.to_rma) ) for cfg in adds: try: @@ -2584,14 +2576,16 @@ def _build_check_mode_output(self) -> Dict[str, Any]: # poap/preprovision sub-blocks since those reflect the user's # desired discovery method, not the resulting inventory state. role = cfg.role - after_list.append({ - "seed_ip": cfg.seed_ip, - "role": getattr(role, "value", str(role)) if role else "leaf", - "auth_proto": "MD5", - "preserve_config": bool(getattr(cfg, "preserve_config", False)), - "username": "", - "password": "", - }) + after_list.append( + { + "seed_ip": cfg.seed_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": bool(getattr(cfg, "preserve_config", False)), + "username": "", + "password": "", + } + ) # Apply role updates in-place update_role_map = {cfg.seed_ip: cfg for cfg in plan.to_update} for entry in after_list: @@ -2606,11 +2600,13 @@ def _build_check_mode_output(self) -> Dict[str, Any]: if not sw.fabric_management_ip: continue role = sw.switch_role - diff_list.append({ - "seed_ip": sw.fabric_management_ip, - "role": getattr(role, "value", str(role)) if role else "leaf", - "_action": "deleted", - }) + diff_list.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + } + ) changed = bool(diff_list) output_level = self.module.params.get("output_level", "normal") @@ -3010,9 +3006,7 @@ def _handle_merged_state( elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") sync_serials = [ - existing_by_ip[cfg.seed_ip].switch_id - for cfg in plan.idempotent - if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id ] self.fabric_ops.finalize(serial_numbers=sync_serials) @@ -3199,9 +3193,7 @@ def _handle_overridden_state( elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") sync_serials = [ - existing_by_ip[cfg.seed_ip].switch_id - for cfg in plan.idempotent - if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id ] self.fabric_ops.finalize(serial_numbers=sync_serials) @@ -3379,9 +3371,7 @@ def _handle_replaced_state( elif idempotent_save_req: self.log.info("No adds/migrations but config-sync required — running finalize") sync_serials = [ - existing_by_ip[cfg.seed_ip].switch_id - for cfg in plan.idempotent - if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id ] self.fabric_ops.finalize(serial_numbers=sync_serials) From a88cea41604181fbebbeddf0e506c78dabf84fa5 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 19:07:19 +0530 Subject: [PATCH 103/109] Revert Changes to Rest about Check Mode, Make the Fix in Switch Resources. --- .../manage_switches/nd_switch_resources.py | 10 + plugins/module_utils/rest/rest_send.py | 6 - .../test_endpoints_api_v1_manage_fabrics_t.py | 271 ------------------ 3 files changed, 10 insertions(+), 277 deletions(-) delete mode 100644 tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index c41c41cb..c727610c 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -3509,12 +3509,22 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: self.log.debug("Querying all switches with endpoint: %s", endpoint.path) self.log.debug("Query verb: %s", endpoint.verb) + # GETs must run against the real API even in check_mode so that the + # before/after diff reflects actual controller state. + rest_send = self.nd.rest_send + in_check_mode = rest_send.check_mode + if in_check_mode: + rest_send.save_settings() + rest_send.check_mode = False try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) except Exception as e: msg = f"Failed to query switches from " f"fabric '{self.fabric}': {e}" self.log.error(msg) self.nd.module.fail_json(msg=msg) + finally: + if in_check_mode: + rest_send.restore_settings() if isinstance(result, list): switches = result diff --git a/plugins/module_utils/rest/rest_send.py b/plugins/module_utils/rest/rest_send.py index 850bfd55..7631b0dd 100644 --- a/plugins/module_utils/rest/rest_send.py +++ b/plugins/module_utils/rest/rest_send.py @@ -275,12 +275,6 @@ def _commit_check_mode(self) -> None: msg += f"verb {self.verb}, path {self.path}." self.log.debug(msg) - # GET is read-only: execute against the real API so check-mode diffs - # reflect actual controller state rather than a fake empty response. - if self.verb == HttpVerbEnum.GET: - self._commit_normal_mode() - return - response_current: dict = {} response_current["RETURN_CODE"] = 200 response_current["METHOD"] = self.verb diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py deleted file mode 100644 index 6abe591b..00000000 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_t.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright: (c) 2026, Akshayanat C S (@achengam) - -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -""" -Unit tests for manage_fabrics.py - -Tests the ND Manage Fabrics endpoint classes. -""" - -from __future__ import absolute_import, annotations, division, print_function - -# pylint: disable=invalid-name -__metaclass__ = type -# pylint: enable=invalid-name - -import pytest -from NDBranch.ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_t import ( - EpManageFabricConfigDeployPost, - EpManageFabricGet, - FabricConfigDeployEndpointParams, -) -from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum -from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( - does_not_raise, -) - -# ============================================================================= -# Test: FabricConfigDeployEndpointParams -# ============================================================================= - - -def test_endpoints_api_v1_manage_fabrics_00010(): - """ - # Summary - - Verify FabricConfigDeployEndpointParams default values - - ## Test - - - force_show_run defaults to None - - incl_all_msd_switches defaults to None - - ## Classes and Methods - - - FabricConfigDeployEndpointParams.__init__() - """ - with does_not_raise(): - params = FabricConfigDeployEndpointParams() - assert params.force_show_run is None - assert params.incl_all_msd_switches is None - - -def test_endpoints_api_v1_manage_fabrics_00020(): - """ - # Summary - - Verify FabricConfigDeployEndpointParams force_show_run can be set - - ## Test - - - force_show_run can be set to True - - ## Classes and Methods - - - FabricConfigDeployEndpointParams.__init__() - """ - with does_not_raise(): - params = FabricConfigDeployEndpointParams(force_show_run=True) - assert params.force_show_run is True - - -def test_endpoints_api_v1_manage_fabrics_00030(): - """ - # Summary - - Verify FabricConfigDeployEndpointParams generates query string with both params - - ## Test - - - to_query_string() includes forceShowRun and inclAllMsdSwitches when both are set - - ## Classes and Methods - - - FabricConfigDeployEndpointParams.to_query_string() - """ - with does_not_raise(): - params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) - result = params.to_query_string() - assert "forceShowRun=true" in result - assert "inclAllMsdSwitches=true" in result - - -def test_endpoints_api_v1_manage_fabrics_00040(): - """ - # Summary - - Verify FabricConfigDeployEndpointParams returns empty query string when no params set - - ## Test - - - to_query_string() returns empty string when no params set - - ## Classes and Methods - - - FabricConfigDeployEndpointParams.to_query_string() - """ - with does_not_raise(): - params = FabricConfigDeployEndpointParams() - result = params.to_query_string() - assert result == "" - - -# ============================================================================= -# Test: EpManageFabricConfigDeployPost -# ============================================================================= - - -def test_endpoints_api_v1_manage_fabrics_00100(): - """ - # Summary - - Verify EpManageFabricConfigDeployPost basic instantiation - - ## Test - - - Instance can be created - - class_name is set correctly - - verb is POST - - ## Classes and Methods - - - EpManageFabricConfigDeployPost.__init__() - - EpManageFabricConfigDeployPost.class_name - - EpManageFabricConfigDeployPost.verb - """ - with does_not_raise(): - instance = EpManageFabricConfigDeployPost() - assert instance.class_name == "EpManageFabricConfigDeployPost" - assert instance.verb == HttpVerbEnum.POST - - -def test_endpoints_api_v1_manage_fabrics_00110(): - """ - # Summary - - Verify EpManageFabricConfigDeployPost raises ValueError when fabric_name is not set - - ## Test - - - Accessing path raises ValueError when fabric_name is None - - ## Classes and Methods - - - EpManageFabricConfigDeployPost.path - """ - instance = EpManageFabricConfigDeployPost() - with pytest.raises(ValueError): - instance.path - - -def test_endpoints_api_v1_manage_fabrics_00120(): - """ - # Summary - - Verify EpManageFabricConfigDeployPost path without query params - - ## Test - - - path returns correct endpoint path - - ## Classes and Methods - - - EpManageFabricConfigDeployPost.path - """ - with does_not_raise(): - instance = EpManageFabricConfigDeployPost() - instance.fabric_name = "MyFabric" - result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy" - - -def test_endpoints_api_v1_manage_fabrics_00130(): - """ - # Summary - - Verify EpManageFabricConfigDeployPost path with force_show_run - - ## Test - - - path includes forceShowRun in query string when set to True - - ## Classes and Methods - - - EpManageFabricConfigDeployPost.path - """ - with does_not_raise(): - instance = EpManageFabricConfigDeployPost() - instance.fabric_name = "MyFabric" - instance.endpoint_params.force_show_run = True - result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" - - -# ============================================================================= -# Test: EpManageFabricGet -# ============================================================================= - - -def test_endpoints_api_v1_manage_fabrics_00200(): - """ - # Summary - - Verify EpManageFabricGet basic instantiation - - ## Test - - - Instance can be created - - class_name is set correctly - - verb is GET - - ## Classes and Methods - - - EpManageFabricGet.__init__() - - EpManageFabricGet.class_name - - EpManageFabricGet.verb - """ - with does_not_raise(): - instance = EpManageFabricGet() - assert instance.class_name == "EpManageFabricGet" - assert instance.verb == HttpVerbEnum.GET - - -def test_endpoints_api_v1_manage_fabrics_00210(): - """ - # Summary - - Verify EpManageFabricGet raises ValueError when fabric_name is not set - - ## Test - - - Accessing path raises ValueError when fabric_name is None - - ## Classes and Methods - - - EpManageFabricGet.path - """ - instance = EpManageFabricGet() - with pytest.raises(ValueError): - instance.path - - -def test_endpoints_api_v1_manage_fabrics_00220(): - """ - # Summary - - Verify EpManageFabricGet path - - ## Test - - - path returns correct endpoint path - - ## Classes and Methods - - - EpManageFabricGet.path - """ - with does_not_raise(): - instance = EpManageFabricGet() - instance.fabric_name = "MyFabric" - result = instance.path - assert result == "/api/v1/manage/fabrics/MyFabric" From cacdeb2353ae41ce19175e75aec81bbfa771d9d5 Mon Sep 17 00:00:00 2001 From: Matt Tarkington Date: Thu, 9 Apr 2026 11:23:55 -0400 Subject: [PATCH 104/109] update actions --- .github/workflows/ansible-test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index e98fd030..7c58908a 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -9,7 +9,6 @@ on: branches: - develop - main - - nd42_integration # schedule: # # * is a special character in YAML so you have to quote this string # - cron: '0 6 * * *' @@ -201,7 +200,7 @@ jobs: units: name: Unit Tests needs: - - ansible-test + - ansible-galaxy-importer runs-on: ubuntu-latest strategy: *ansible_strategy From 331fba74e03c34865ba4b20397f25e64f90eb17f Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 22:23:51 +0530 Subject: [PATCH 105/109] Pydantic V2 POS Field Fixes --- .../manage_switches/bootstrap_models.py | 27 +++++++++---------- .../models/manage_switches/config_models.py | 16 ++++------- .../manage_switches/discovery_models.py | 11 ++++---- .../manage_switches/preprovision_models.py | 9 ------- .../models/manage_switches/rma_models.py | 16 +++++------ .../manage_switches/switch_actions_models.py | 3 +-- .../manage_switches/switch_data_models.py | 5 ++-- 7 files changed, 34 insertions(+), 53 deletions(-) diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py index 22b3ba9e..7ff47ea4 100644 --- a/plugins/module_utils/models/manage_switches/bootstrap_models.py +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -90,8 +90,8 @@ class BootstrapCredentialModel(NDBaseModel): identifiers: ClassVar[List[str]] = [] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] - password: str = Field(..., description="Switch password to be set during bootstrap for admin user") - discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") use_new_credentials: bool = Field( default=False, alias="useNewCredentials", @@ -140,16 +140,15 @@ class BootstrapImportSpecificModel(NDBaseModel): identifiers: ClassVar[List[str]] = ["serial_number"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - hostname: str = Field(..., description="Hostname of the bootstrap switch") - ip: str = Field(..., description="IP address of the bootstrap switch") - serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") + hostname: str = Field(description="Hostname of the bootstrap switch") + ip: str = Field(description="IP address of the bootstrap switch") + serial_number: str = Field(alias="serialNumber", description="Serial number of the bootstrap switch") in_inventory: bool = Field( - ..., alias="inInventory", description="True if the bootstrap switch is in inventory", ) - public_key: str = Field(..., alias="publicKey", description="Public Key") - finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") + public_key: str = Field(alias="publicKey", description="Public Key") + finger_print: str = Field(alias="fingerPrint", description="Fingerprint") dhcp_bootstrap_ip: Optional[str] = Field( default=None, alias="dhcpBootstrapIp", @@ -184,17 +183,17 @@ class BootstrapImportSwitchModel(NDBaseModel): identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] - serial_number: str = Field(..., alias="serialNumber", description="Serial number of the bootstrap switch") + serial_number: str = Field(alias="serialNumber", description="Serial number of the bootstrap switch") model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") software_version: Optional[str] = Field( default=None, alias="softwareVersion", description="Software version of the bootstrap switch", ) - hostname: str = Field(..., description="Hostname of the bootstrap switch") - ip: str = Field(..., description="IP address of the bootstrap switch") - password: str = Field(..., description="Switch password to be set during bootstrap for admin user") - discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") + hostname: str = Field(description="Hostname of the bootstrap switch") + ip: str = Field(description="IP address of the bootstrap switch") + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") remote_credential_store: RemoteCredentialStore = Field( @@ -275,7 +274,7 @@ class ImportBootstrapSwitchesRequestModel(NDBaseModel): identifiers: ClassVar[List[str]] = [] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" - switches: List[BootstrapImportSwitchModel] = Field(..., description="PowerOn Auto Provisioning switches") + switches: List[BootstrapImportSwitchModel] = Field(description="PowerOn Auto Provisioning switches") def to_payload(self) -> Dict[str, Any]: """Convert to API payload format.""" diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py index aa4e70cd..6e44571e 100644 --- a/plugins/module_utils/models/manage_switches/config_models.py +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -51,12 +51,11 @@ class ConfigDataModel(NDNestedModel): identifiers: ClassVar[List[str]] = [] models: List[str] = Field( - ..., alias="models", min_length=1, description="List of model of modules in switch to Bootstrap/Pre-provision/RMA", ) - gateway: str = Field(..., description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)") + gateway: str = Field(description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)") @field_validator("models", mode="before") @classmethod @@ -93,12 +92,11 @@ class POAPConfigModel(NDNestedModel): # Mandatory serial_number: str = Field( - ..., alias="serialNumber", min_length=1, description="Serial number of the physical switch to Bootstrap", ) - hostname: str = Field(..., description="Hostname for the switch during bootstrap") + hostname: str = Field(description="Hostname for the switch during bootstrap") # Optional discovery_username: Optional[str] = Field( @@ -149,16 +147,14 @@ class PreprovisionConfigModel(NDNestedModel): # Mandatory serial_number: str = Field( - ..., alias="serialNumber", min_length=1, description="Serial number of switch to Pre-provision", ) - model: str = Field(..., min_length=1, description="Model of switch to Pre-provision") - version: str = Field(..., min_length=1, description="Software version of switch to Pre-provision") - hostname: str = Field(..., description="Hostname for the switch during pre-provision") + model: str = Field(min_length=1, description="Model of switch to Pre-provision") + version: str = Field(min_length=1, description="Software version of switch to Pre-provision") + hostname: str = Field(description="Hostname for the switch during pre-provision") config_data: ConfigDataModel = Field( - ..., alias="configData", description=("Basic config data of switch to Pre-provision. " "'models' (list of module models) and 'gateway' (IP with mask) are mandatory."), ) @@ -215,7 +211,6 @@ class RMAConfigModel(NDNestedModel): # Required new_serial_number: str = Field( - ..., alias="newSerialNumber", min_length=1, description="Serial number of the replacement switch to bootstrap for RMA", @@ -278,7 +273,6 @@ class SwitchConfigModel(NDBaseModel): # Required fields seed_ip: str = Field( - ..., alias="seedIp", min_length=1, description="Seed IP address or DNS name of the switch", diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py index 7091fa21..2875ce46 100644 --- a/plugins/module_utils/models/manage_switches/discovery_models.py +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -44,7 +44,6 @@ class ShallowDiscoveryRequestModel(NDBaseModel): identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] seed_ip_collection: List[str] = Field( - ..., alias="seedIpCollection", min_length=1, description="Seed switch IP collection", @@ -110,10 +109,10 @@ class SwitchDiscoveryModel(NDBaseModel): identifiers: ClassVar[List[str]] = ["serial_number"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - hostname: str = Field(..., description="Switch host name") - ip: str = Field(..., description="Switch IPv4/v6 address") - serial_number: str = Field(..., alias="serialNumber", description="Switch serial number") - model: str = Field(..., description="Switch model") + hostname: str = Field(description="Switch host name") + ip: str = Field(description="Switch IPv4/v6 address") + serial_number: str = Field(alias="serialNumber", description="Switch serial number") + model: str = Field(description="Switch model") software_version: Optional[str] = Field(default=None, alias="softwareVersion", description="Switch software version") vdc_id: Optional[int] = Field( default=None, @@ -159,7 +158,7 @@ class AddSwitchesRequestModel(NDBaseModel): identifiers: ClassVar[List[str]] = [] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" exclude_from_diff: ClassVar[List[str]] = ["password"] - switches: List[SwitchDiscoveryModel] = Field(..., min_length=1, description="The list of switches to be imported") + switches: List[SwitchDiscoveryModel] = Field(min_length=1, description="The list of switches to be imported") platform_type: PlatformType = Field( default=PlatformType.NX_OS, alias="platformType", diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py index 435a1611..597eebab 100644 --- a/plugins/module_utils/models/manage_switches/preprovision_models.py +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -45,16 +45,13 @@ class PreProvisionSwitchModel(NDBaseModel): # --- preProvisionSpecific fields (required) --- serial_number: str = Field( - ..., alias="serialNumber", description="Serial number of the switch to pre-provision", ) hostname: str = Field( - ..., description="Hostname of the switch to pre-provision", ) ip: str = Field( - ..., description="IP address of the switch to pre-provision", ) @@ -72,16 +69,13 @@ class PreProvisionSwitchModel(NDBaseModel): # --- bootstrapBase fields (required) --- model: str = Field( - ..., description="Model of the switch to pre-provision", ) software_version: str = Field( - ..., alias="softwareVersion", description="Software version of the switch to pre-provision", ) gateway_ip_mask: str = Field( - ..., alias="gatewayIpMask", description="Gateway IP address with mask (e.g., 10.23.244.1/24)", ) @@ -104,11 +98,9 @@ class PreProvisionSwitchModel(NDBaseModel): # --- bootstrapCredential fields (required) --- password: str = Field( - ..., description="Switch password to be set during pre-provision for admin user", ) discovery_auth_protocol: SnmpV3AuthProtocol = Field( - ..., alias="discoveryAuthProtocol", description="SNMP authentication protocol for discovery", ) @@ -186,7 +178,6 @@ class PreProvisionSwitchesRequestModel(NDBaseModel): identifiers: ClassVar[List[str]] = [] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" switches: List[PreProvisionSwitchModel] = Field( - ..., description="PowerOn Auto Provisioning switches", ) diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py index 40b9f843..48b2d89b 100644 --- a/plugins/module_utils/models/manage_switches/rma_models.py +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -59,20 +59,20 @@ class RMASwitchModel(NDBaseModel): switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") # From bootstrapCredential - password: str = Field(..., description="Switch password to be set during bootstrap for admin user") - discovery_auth_protocol: SnmpV3AuthProtocol = Field(..., alias="discoveryAuthProtocol") + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") remote_credential_store: RemoteCredentialStore = Field(default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore") remote_credential_store_key: Optional[str] = Field(default=None, alias="remoteCredentialStoreKey") # From RMASpecific - hostname: str = Field(..., description="Hostname of the switch") - ip: str = Field(..., description="IP address of the switch") - new_switch_id: str = Field(..., alias="newSwitchId", description="SwitchId (serial number) of the replacement switch") - old_switch_id: str = Field(..., alias="oldSwitchId", description="SwitchId (serial number) of the switch being replaced") - public_key: str = Field(..., alias="publicKey", description="Public Key") - finger_print: str = Field(..., alias="fingerPrint", description="Fingerprint") + hostname: str = Field(description="Hostname of the switch") + ip: str = Field(description="IP address of the switch") + new_switch_id: str = Field(alias="newSwitchId", description="SwitchId (serial number) of the replacement switch") + old_switch_id: str = Field(alias="oldSwitchId", description="SwitchId (serial number) of the switch being replaced") + public_key: str = Field(alias="publicKey", description="Public Key") + finger_print: str = Field(alias="fingerPrint", description="Fingerprint") dhcp_bootstrap_ip: Optional[str] = Field(default=None, alias="dhcpBootstrapIp") seed_switch: bool = Field(default=False, alias="seedSwitch") data: Optional[Dict[str, Any]] = Field( diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py index 0c79f988..608cc2ab 100644 --- a/plugins/module_utils/models/manage_switches/switch_actions_models.py +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -39,7 +39,6 @@ class SwitchCredentialsRequestModel(NDBaseModel): identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" switch_ids: List[str] = Field( - ..., alias="switchIds", min_length=1, description="List of switch serial numbers", @@ -94,7 +93,7 @@ class ChangeSwitchSerialNumberRequestModel(NDBaseModel): identifiers: ClassVar[List[str]] = ["new_switch_id"] identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" - new_switch_id: str = Field(..., alias="newSwitchId", description="New switchId") + new_switch_id: str = Field(alias="newSwitchId", description="New switchId") @field_validator("new_switch_id", mode="before") @classmethod diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py index cd8dd030..d6e8b4f7 100644 --- a/plugins/module_utils/models/manage_switches/switch_data_models.py +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -64,8 +64,8 @@ class VpcData(NDNestedModel): """ identifiers: ClassVar[List[str]] = [] - vpc_domain: int = Field(..., alias="vpcDomain", ge=1, le=1000, description="vPC domain ID") - peer_switch_id: str = Field(..., alias="peerSwitchId", description="vPC peer switch serial number") + vpc_domain: int = Field(alias="vpcDomain", ge=1, le=1000, description="vPC domain ID") + peer_switch_id: str = Field(alias="peerSwitchId", description="vPC peer switch serial number") consistent_status: Optional[bool] = Field( default=None, alias="consistentStatus", @@ -202,7 +202,6 @@ class SwitchDataModel(NDBaseModel): identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" exclude_from_diff: ClassVar[set] = {"system_up_time", "anomaly_level", "advisory_level", "alert_suspend"} switch_id: str = Field( - ..., alias="switchId", description="Serial number of Switch or Node Id of ACI switch", ) From 2a7be2c02eaa09e7d14a7dc3ae279d451c886a75 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Thu, 9 Apr 2026 22:26:02 +0530 Subject: [PATCH 106/109] Fix Lint --- plugins/modules/nd_manage_switches.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py index 9446e077..1f6e196e 100644 --- a/plugins/modules/nd_manage_switches.py +++ b/plugins/modules/nd_manage_switches.py @@ -377,7 +377,6 @@ fabric: my-fabric state: gathered register: result - """ RETURN = r""" From f4fe618154db282598ffe4d9b9fed92ff5cd41db Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 10 Apr 2026 16:37:04 +0530 Subject: [PATCH 107/109] Handle NDRequests and Retry Timeouts --- .../manage_switches/nd_switch_resources.py | 49 +++++++++++++++-- plugins/module_utils/utils.py | 53 +++++++++++++++++++ 2 files changed, 97 insertions(+), 5 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index c727610c..7722c6af 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -67,6 +67,7 @@ RMAConfigModel, ) from ansible_collections.cisco.nd.plugins.module_utils.utils import ( + ApiDataChecker, FabricUtils, SwitchOperationError, ) @@ -743,16 +744,31 @@ def bulk_discover( # Extract discovered switches from response switches_data = [] + response_data: Dict[str, Any] = {} if response and isinstance(response, dict): if "DATA" in response and isinstance(response["DATA"], dict): - switches_data = response["DATA"].get("switches", []) + response_data = response["DATA"] + switches_data = response_data.get("switches", []) elif "body" in response and isinstance(response["body"], dict): - switches_data = response["body"].get("switches", []) + response_data = response["body"] + switches_data = response_data.get("switches", []) elif "switches" in response: switches_data = response.get("switches", []) log.debug("Extracted %s switches from discovery response", len(switches_data)) + ApiDataChecker.check(response_data, f"Switch discovery for {seed_ips}", log, nd.module.fail_json) + + # Fail early for any unreachable switches — before data touches models. + # The API returns status="notReachable" with an empty serialNumber and + # a top-level "warning" string explaining reachability requirements. + unreachable = [sw for sw in switches_data if isinstance(sw, dict) and sw.get("status", "").lower() == "notreachable"] + if unreachable: + api_warning = response_data.get("warning", "").strip() + msg = f"Switch discovery failed: {api_warning}" + log.error(msg) + nd.module.fail_json(msg=msg) + discovered_results: Dict[str, Dict[str, Any]] = {} for discovered in switches_data: if not isinstance(discovered, dict): @@ -977,6 +993,9 @@ def bulk_add( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check( + response.get("DATA", {}), f"Bulk add switches to fabric '{self.ctx.fabric}' ({', '.join(serial_numbers)})", log, nd.module.fail_json + ) results.action = "create" results.operation_type = OperationType.CREATE @@ -1054,6 +1073,9 @@ def bulk_delete( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check( + response.get("DATA", {}), f"Bulk delete switches from fabric '{self.ctx.fabric}' ({serial_numbers})", log, nd.module.fail_json + ) results.action = "delete" results.operation_type = OperationType.DELETE @@ -1123,6 +1145,7 @@ def bulk_save_credentials( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"Save credentials for switches {serial_numbers}", log, nd.module.fail_json) results.action = "save_credentials" results.operation_type = OperationType.UPDATE @@ -1184,6 +1207,7 @@ def bulk_update_roles( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"Update switch roles in fabric '{self.ctx.fabric}'", log, nd.module.fail_json) results.action = "update_role" results.operation_type = OperationType.UPDATE @@ -1687,6 +1711,7 @@ def _import_bootstrap_switches( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"importBootstrap for {[m.serial_number for m in models]}", log, nd.module.fail_json) results.action = "bootstrap" results.operation_type = OperationType.CREATE @@ -1797,6 +1822,7 @@ def _preprovision_switches( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"preProvision for {[m.serial_number for m in models]}", log, nd.module.fail_json) results.action = "preprovision" results.operation_type = OperationType.CREATE @@ -1918,6 +1944,7 @@ def _handle_poap_swap( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"changeSwitchSerialNumber {old_serial} → {new_serial}", log, nd.module.fail_json) results.action = "swap_serial" results.operation_type = OperationType.UPDATE @@ -2352,6 +2379,7 @@ def _provision_rma_switch( response = nd.rest_send.response_current result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"RMA provision {rma_model.old_switch_id} → {rma_model.new_switch_id}", log, nd.module.fail_json) results.action = "rma" results.operation_type = OperationType.CREATE @@ -2413,6 +2441,11 @@ def __init__( # Shared context for service classes config_actions = self.module.params.get("config_actions") or {} + + # Set retry count for Rest Send API calls. + self.request_retry_count: int = 1 + nd._get_rest_send().timeout = self.request_retry_count + self.ctx = SwitchServiceContext( nd=nd, results=results, @@ -3511,21 +3544,27 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: # GETs must run against the real API even in check_mode so that the # before/after diff reflects actual controller state. - rest_send = self.nd.rest_send - in_check_mode = rest_send.check_mode + in_check_mode = self.nd.module.check_mode + rest_send = self.nd._get_rest_send() if in_check_mode: rest_send.save_settings() rest_send.check_mode = False try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) except Exception as e: - msg = f"Failed to query switches from " f"fabric '{self.fabric}': {e}" + msg = f"Failed to query switches from fabric '{self.fabric}': {e}" self.log.error(msg) self.nd.module.fail_json(msg=msg) finally: if in_check_mode: rest_send.restore_settings() + # nd.request() returns response["DATA"] directly. For a 404, the + # controller embeds the error as {"code": 404, "message": "Fabric not found"} + # inside DATA. RestSend treats GET 404 as success=True/found=False so no + # exception is raised — detect it here from the returned data itself. + ApiDataChecker.check(result, f"Query switches from fabric '{self.fabric}'", self.log, self.nd.module.fail_json) + if isinstance(result, list): switches = result elif isinstance(result, dict): diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 2bf08d4e..532d653e 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -111,6 +111,53 @@ class SwitchOperationError(Exception): """Raised when a switch operation fails.""" +# ========================================================================= +# API Response Validation +# ========================================================================= + + +class ApiDataChecker: + """Detect controller-embedded errors in API response DATA payloads. + + The Nexus Dashboard API signals certain errors by embedding an error + object inside ``DATA`` as ``{"code": , "message": ""}`` even + when the transport-level result is marked successful. Any payload dict + that contains a ``"code"`` key is treated as an error; the absence of + ``"code"`` means the payload is a genuine data body. + """ + + @staticmethod + def check( + data: Any, + context: str, + log: logging.Logger, + fail_callback=None, + ) -> None: + """Fail or raise if the response DATA contains an embedded error code. + + Args: + data: Value returned by ``nd.request()`` or extracted from + ``response_current["DATA"]``. + context: Human-readable description of the operation. + log: Logger instance. + fail_callback: Optional callable (e.g. ``module.fail_json``) that + accepts a ``msg`` keyword argument. When provided + it is called on error instead of raising + ``SwitchOperationError``. + """ + if isinstance(data, dict) and "code" in data: + error_msg = data.get("message", "Unknown error") + msg = ( + f"{context} failed — controller returned error: " + f"{error_msg} (code={data['code']})" + ) + log.error(msg) + if fail_callback is not None: + fail_callback(msg=msg) + else: + raise SwitchOperationError(msg) + + # ========================================================================= # Fabric Utilities # ========================================================================= @@ -244,8 +291,11 @@ def deploy_switches(self, serial_numbers: List[str]) -> Dict[str, Any]: verb=self.ep_switch_deploy.verb, data={"switchIds": serial_numbers}, ) + ApiDataChecker.check(response, f"Switch-level deploy for fabric '{self.fabric}'", self.log) self.log.info("Switch-level deploy completed for fabric: %s", self.fabric) return response + except SwitchOperationError: + raise except Exception as e: self.log.error("Switch-level deploy failed for fabric %s: %s", self.fabric, e) raise SwitchOperationError(f"Switch-level deploy failed for fabric {self.fabric}: {e}") from e @@ -281,8 +331,11 @@ def _request_endpoint(self, endpoint, action: str = "Request") -> Dict[str, Any] self.log.info("%s for fabric: %s", action, self.fabric) try: response = self.nd.request(endpoint.path, verb=endpoint.verb) + ApiDataChecker.check(response, f"{action} for fabric '{self.fabric}'", self.log) self.log.info("%s completed for fabric: %s", action, self.fabric) return response + except SwitchOperationError: + raise except Exception as e: self.log.error("%s failed for fabric %s: %s", action, self.fabric, e) raise SwitchOperationError(f"{action} failed for fabric {self.fabric}: {e}") from e From 0ea6d216420ba36bc0268deb3b0b75e5ca87775f Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 10 Apr 2026 18:07:50 +0530 Subject: [PATCH 108/109] Fix Rest Send Initialization --- plugins/module_utils/manage_switches/nd_switch_resources.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 7722c6af..81921735 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2444,7 +2444,7 @@ def __init__( # Set retry count for Rest Send API calls. self.request_retry_count: int = 1 - nd._get_rest_send().timeout = self.request_retry_count + self.nd._get_rest_send().timeout = self.request_retry_count self.ctx = SwitchServiceContext( nd=nd, @@ -3545,7 +3545,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: # GETs must run against the real API even in check_mode so that the # before/after diff reflects actual controller state. in_check_mode = self.nd.module.check_mode - rest_send = self.nd._get_rest_send() + rest_send = self.nd.rest_send if in_check_mode: rest_send.save_settings() rest_send.check_mode = False From 3ae57ed79bcd4bd418f89c03935e18d72bbc5ce3 Mon Sep 17 00:00:00 2001 From: AKDRG Date: Fri, 10 Apr 2026 23:49:45 +0530 Subject: [PATCH 109/109] Fix for timeout + check_mode --- .../manage_switches/nd_switch_resources.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py index 81921735..2805276e 100644 --- a/plugins/module_utils/manage_switches/nd_switch_resources.py +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -2442,9 +2442,12 @@ def __init__( # Shared context for service classes config_actions = self.module.params.get("config_actions") or {} - # Set retry count for Rest Send API calls. + # Configure RestSend once: fix timeout to request_retry_count so all + # API calls use a single retry iteration instead of the default 300s loop. + # check_mode is NOT overridden globally — read-only calls that must reach + # the controller override it locally via save_settings()/restore_settings(). self.request_retry_count: int = 1 - self.nd._get_rest_send().timeout = self.request_retry_count + self.nd.rest_send_timeout = self.request_retry_count self.ctx = SwitchServiceContext( nd=nd, @@ -3542,13 +3545,12 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: self.log.debug("Querying all switches with endpoint: %s", endpoint.path) self.log.debug("Query verb: %s", endpoint.verb) - # GETs must run against the real API even in check_mode so that the - # before/after diff reflects actual controller state. + # GETs must reach the real controller even when Ansible runs with --check. + # Temporarily override check_mode to False so RestSend sends the real + # request instead of returning a simulated response, then restore it. in_check_mode = self.nd.module.check_mode - rest_send = self.nd.rest_send if in_check_mode: - rest_send.save_settings() - rest_send.check_mode = False + self.nd.rest_send_check_mode = False try: result = self.nd.request(path=endpoint.path, verb=endpoint.verb) except Exception as e: @@ -3557,7 +3559,7 @@ def _query_all_switches(self) -> List[Dict[str, Any]]: self.nd.module.fail_json(msg=msg) finally: if in_check_mode: - rest_send.restore_settings() + self.nd.rest_send_check_mode = True # nd.request() returns response["DATA"] directly. For a 404, the # controller embeds the error as {"code": 404, "message": "Fabric not found"}