diff --git a/playbooks/assurance_device_health_score_settings_playbook_config_generator.yml b/playbooks/assurance_device_health_score_settings_playbook_config_generator.yml index 6b683f1b43..f53101367a 100644 --- a/playbooks/assurance_device_health_score_settings_playbook_config_generator.yml +++ b/playbooks/assurance_device_health_score_settings_playbook_config_generator.yml @@ -23,9 +23,11 @@ dnac_task_poll_interval: 1 state: gathered file_path: "generated_files/device_health_score_settings_playbook1.yml" - file_mode: "overwrite" + file_mode: "append" config: component_specific_filters: components_list: ["device_health_score_settings"] device_health_score_settings: - device_families: ["ROUTER"] + - device_families: ["ROUTER", "WIRELESS_CONTROLLER"] + - device_families: ["UNIFIED_AP", "SWITCH_AND_HUB"] + - device_families: ["WIRELESS_CONTROLLER"] diff --git a/playbooks/inventory_playbook_config_generator.yml b/playbooks/inventory_playbook_config_generator.yml index ac23223ab1..f49022d28a 100644 --- a/playbooks/inventory_playbook_config_generator.yml +++ b/playbooks/inventory_playbook_config_generator.yml @@ -14,6 +14,7 @@ # 4. Three-Document Output: Separate YAML documents for each component # 5. HTTP Fields Support: http_username, http_password, http_port, http_secure # 6. Smart File Creation: No file created if no data matches filters +# 7. Preferred Syntax: file_path and file_mode are top-level module arguments # # =================================================================================================== @@ -44,10 +45,8 @@ dnac_log: true dnac_log_level: INFO state: gathered - config: - generate_all_configurations: true - file_mode: "overwrite" - file_path: "inventory_all_devices_complete.yml" + file_path: "inventory_all_devices_complete.yml" + file_mode: "overwrite" tags: [scenario1, complete_discovery] # =================================================================================== @@ -69,9 +68,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_specific_ips.yml" + file_mode: "overwrite" config: - file_path: "inventory_specific_ips.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -100,9 +99,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_device_details_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_device_details_only.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -130,9 +129,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_provision_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_provision_only.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["provision_device"] provision_device: @@ -158,9 +157,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_only.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "205.1.2.67" @@ -191,9 +190,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_independent_filters.yml" + file_mode: "overwrite" config: - file_path: "inventory_independent_filters.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "provision_device", "interface_details"] device_details: @@ -221,9 +220,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_global_component_filters.yml" + file_mode: "overwrite" config: - file_path: "inventory_global_component_filters.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -254,9 +253,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_access_role.yml" + file_mode: "overwrite" config: - file_path: "inventory_access_role.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details"] device_details: @@ -282,9 +281,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_multi_role.yml" + file_mode: "overwrite" config: - file_path: "inventory_multi_role.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details"] device_details: @@ -310,9 +309,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_device_provision.yml" + file_mode: "overwrite" config: - file_path: "inventory_device_provision.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -340,9 +339,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_site_bangalore_bld1.yml" + file_mode: "overwrite" config: - file_path: "inventory_site_bangalore_bld1.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["provision_device"] provision_device: @@ -362,9 +361,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_site_bangalore_bld2.yml" + file_mode: "overwrite" config: - file_path: "inventory_site_bangalore_bld2.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["provision_device"] provision_device: @@ -396,7 +395,6 @@ - "172.27.248.223" component_specific_filters: components_list: ["device_details"] - file_mode: "overwrite" tags: [scenario12, default_path] # =================================================================================== @@ -418,9 +416,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_vlan100_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_vlan100_only.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["interface_details"] interface_details: @@ -446,9 +444,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_multi_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_multi_filter.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["interface_details"] interface_details: @@ -475,9 +473,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_ip_interface_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_ip_interface_filter.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "100.1.1.61" @@ -508,9 +506,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_device_filtered_interfaces.yml" + file_mode: "overwrite" config: - file_path: "inventory_device_filtered_interfaces.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -540,9 +538,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_all_filtered_interfaces.yml" + file_mode: "overwrite" config: - file_path: "inventory_all_filtered_interfaces.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -575,9 +573,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_no_match.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_no_match.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -608,9 +606,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_gigabitethernet_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_gigabitethernet_only.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["interface_details"] interface_details: @@ -636,9 +634,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_access_devices_interface_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_access_devices_interface_filter.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "interface_details"] device_details: @@ -665,9 +663,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_only.yml" + file_mode: "overwrite" config: - file_path: "inventory_udf_only.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["user_defined_fields"] tags: [scenario21, udf_only] @@ -692,9 +690,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_all_components_with_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_all_components_with_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "provision_device", "interface_details", "user_defined_fields"] tags: [scenario22, all_components_udf] @@ -718,9 +716,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_device_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_device_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "user_defined_fields"] tags: [scenario23, device_udf] @@ -745,9 +743,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_ip_filter_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_ip_filter_udf.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "206.1.2.3" @@ -776,9 +774,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_provision_site_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_provision_site_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["provision_device", "user_defined_fields"] provision_device: @@ -804,9 +802,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["interface_details", "user_defined_fields"] tags: [scenario26, interface_udf] @@ -830,9 +828,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_interface_name_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_interface_name_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["interface_details", "user_defined_fields"] interface_details: @@ -858,9 +856,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_access_role_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_access_role_udf.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "user_defined_fields"] device_details: @@ -886,9 +884,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_complex_multi_filter_udf.yml" + file_mode: "overwrite" config: - file_path: "inventory_complex_multi_filter_udf.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "172.27.248.223" @@ -924,10 +922,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_audit_complete.yml" + file_mode: "overwrite" config: - generate_all_configurations: true - file_mode: "overwrite" - file_path: "inventory_udf_audit_complete.yml" component_specific_filters: components_list: ["user_defined_fields"] tags: [scenario30, udf_audit] @@ -951,9 +948,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_name_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_udf_name_filter.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["user_defined_fields"] user_defined_fields: @@ -980,9 +977,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_value_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_udf_value_filter.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["user_defined_fields"] user_defined_fields: @@ -1008,9 +1005,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_ip_udf_name_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_ip_udf_name_filter.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "206.1.2.4" @@ -1041,9 +1038,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_device_udf_name_filter.yml" + file_mode: "overwrite" config: - file_path: "inventory_device_udf_name_filter.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["device_details", "user_defined_fields"] user_defined_fields: @@ -1070,9 +1067,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_all_udf_filtered.yml" + file_mode: "overwrite" config: - file_path: "inventory_all_udf_filtered.yml" - file_mode: "overwrite" global_filters: ip_address_list: - "206.1.2.4" @@ -1104,9 +1101,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_name_filter_single.yml" + file_mode: "overwrite" config: - file_path: "inventory_udf_name_filter_single.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["user_defined_fields"] user_defined_fields: @@ -1132,9 +1129,9 @@ dnac_log: true dnac_log_level: INFO state: gathered + file_path: "inventory_udf_value_filter_single.yml" + file_mode: "overwrite" config: - file_path: "inventory_udf_value_filter_single.yml" - file_mode: "overwrite" component_specific_filters: components_list: ["user_defined_fields"] user_defined_fields: diff --git a/playbooks/sda_extranet_policies_workflow_manager.yml b/playbooks/sda_extranet_policies_workflow_manager.yml index 2c2352db29..6ad02120f2 100644 --- a/playbooks/sda_extranet_policies_workflow_manager.yml +++ b/playbooks/sda_extranet_policies_workflow_manager.yml @@ -22,7 +22,7 @@ tasks: - name: Create Extranet Policy - cisco.dnac.network_compliance_workflow_manager: + cisco.dnac.sda_extranet_policies_workflow_manager: <<: *dnac_login state: merged config: @@ -32,7 +32,7 @@ - name: Create Extranet Policy with Fabric Site(s) specified - cisco.dnac.network_compliance_workflow_manager: + cisco.dnac.sda_extranet_policies_workflow_manager: <<: *dnac_login state: merged config: @@ -43,7 +43,7 @@ - name: Update existing Extranet Policy - cisco.dnac.network_compliance_workflow_manager: + cisco.dnac.sda_extranet_policies_workflow_manager: <<: *dnac_login state: merged config: @@ -52,7 +52,7 @@ subscriber_virtual_networks: ["VN_2", "VN_4"] - name: Update existing Extranet Policy with Fabric Site(s) specified - cisco.dnac.network_compliance_workflow_manager: + cisco.dnac.sda_extranet_policies_workflow_manager: <<: *dnac_login state: merged config: @@ -63,7 +63,7 @@ - name: Delete Extranet Policy - cisco.dnac.network_compliance_workflow_manager: + cisco.dnac.sda_extranet_policies_workflow_manager: <<: *dnac_login state: deleted config: diff --git a/plugins/module_utils/brownfield_helper.py b/plugins/module_utils/brownfield_helper.py index a0b4401630..8a2cf18cdd 100644 --- a/plugins/module_utils/brownfield_helper.py +++ b/plugins/module_utils/brownfield_helper.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, division, print_function import datetime -import hashlib import os from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( validate_list_of_dicts, @@ -301,6 +300,10 @@ def validate_component_specific_filters(self, component_specific_filters): for component_name, component_filters in component_specific_filters.items(): if component_name == "components_list": + self.log( + "Skipping 'components_list' key — not a component entry.", + "DEBUG", + ) continue # Check if component exists @@ -310,6 +313,13 @@ def validate_component_specific_filters(self, component_specific_filters): ) continue + # Component Filters must be list + if not isinstance(component_filters, list): + invalid_filters.append( + "Component '{0}' filters must be a list".format(component_name) + ) + continue + # Get valid filters for this component valid_filters_for_component = network_elements[component_name].get( "filters", {} @@ -327,9 +337,37 @@ def validate_component_specific_filters(self, component_specific_filters): ) continue - # Enhanced validation for new format (dict with rules) - if isinstance(component_filters, dict): - for filter_name, filter_value in component_filters.items(): + # Validating component filters entries + self.log( + "Validating {0} filter entry/entries for component '{1}'.".format( + len(component_filters), component_name + ), + "DEBUG", + ) + + for index, component_filter in enumerate(component_filters, start=1): + self.log( + "Validating filter entry {0}/{1} for component '{2}': {3}".format( + index, len(component_filters), component_name, component_filter + ), + "DEBUG", + ) + + if not isinstance(component_filter, dict): + invalid_filters.append( + "Component '{0}' filter entry must be a dict, got {1}".format( + component_name, type(component_filter).__name__ + ) + ) + continue + + for filter_name, filter_value in component_filter.items(): + self.log( + "Processing filter '{0}' in entry {1}/{2} for component '{3}': value={4}".format( + filter_name, index, len(component_filters), component_name, filter_value + ), + "DEBUG", + ) if filter_name not in valid_filters_for_component: invalid_filters.append( "Filter '{0}' not valid for component '{1}'".format( @@ -341,6 +379,13 @@ def validate_component_specific_filters(self, component_specific_filters): filter_spec = valid_filters_for_component[filter_name] # Validate type expected_type = filter_spec.get("type", "str") + self.log( + "Validating filter '{0}' for component '{1}': expected_type='{2}', value={3}".format( + filter_name, component_name, expected_type, filter_value + ), + "DEBUG", + ) + if expected_type == "list" and not isinstance(filter_value, list): invalid_filters.append( "Component '{0}' filter '{1}' must be a list".format( @@ -369,6 +414,13 @@ def validate_component_specific_filters(self, component_specific_filters): ) ) continue + elif expected_type == "bool" and not isinstance(filter_value, bool): + invalid_filters.append( + "Component '{0}' filter '{1}' must be a boolean".format( + component_name, filter_name + ) + ) + continue # ADD: Direct range validation for integers if expected_type == "int" and "range" in filter_spec: @@ -389,6 +441,12 @@ def validate_component_specific_filters(self, component_specific_filters): # Validate patterns for string filters if expected_type == "str" and "pattern" in filter_spec: pattern = filter_spec["pattern"] + self.log( + "Checking pattern for component '{0}' filter '{1}': value='{2}', pattern='{3}'.".format( + component_name, filter_name, filter_value, pattern + ), + "DEBUG", + ) if isinstance(filter_value, str) and not re.match( pattern, filter_value ): @@ -402,6 +460,12 @@ def validate_component_specific_filters(self, component_specific_filters): # Validate choices for lists if expected_type == "list" and "choices" in filter_spec: valid_choices = filter_spec["choices"] + self.log( + "Checking list choices for component '{0}' filter '{1}': valid_choices={2}.".format( + component_name, filter_name, valid_choices + ), + "DEBUG", + ) invalid_choices = [ item for item in filter_value if item not in valid_choices ] @@ -419,6 +483,13 @@ def validate_component_specific_filters(self, component_specific_filters): if expected_type == "list" and filter_value: element_type = filter_spec.get("elements", "str") range_values = filter_spec.get("range") + self.log( + "Validating list elements for component '{0}' filter '{1}': " + "element_type='{2}', element_count={3}.".format( + component_name, filter_name, element_type, len(filter_value) + ), + "DEBUG", + ) for i, element in enumerate(filter_value): # ADD: Range validation for list elements @@ -443,6 +514,12 @@ def validate_component_specific_filters(self, component_specific_filters): # Validate choices for strings if expected_type == "str" and "choices" in filter_spec: valid_choices = filter_spec["choices"] + self.log( + "Checking string choices for component '{0}' filter '{1}': value='{2}', valid_choices={3}.".format( + component_name, filter_name, filter_value, valid_choices + ), + "DEBUG", + ) if filter_value not in valid_choices: invalid_filters.append( "Component '{0}' filter '{1}' has invalid value: '{2}'. Valid choices: {3}".format( @@ -456,6 +533,12 @@ def validate_component_specific_filters(self, component_specific_filters): # Validate nested dict options and apply dynamic validation if expected_type == "dict" and "options" in filter_spec: nested_options = filter_spec["options"] + self.log( + "Validating nested dict options for component '{0}' filter '{1}': keys={2}.".format( + component_name, filter_name, list(filter_value.keys()) + ), + "DEBUG", + ) for nested_key, nested_value in filter_value.items(): if nested_key not in nested_options: invalid_filters.append( @@ -467,6 +550,13 @@ def validate_component_specific_filters(self, component_specific_filters): nested_spec = nested_options[nested_key] nested_type = nested_spec.get("type", "str") + self.log( + "Validating nested key '{0}' in component '{1}' filter '{2}': " + "expected_type='{3}', value={4}.".format( + nested_key, component_name, filter_name, nested_type, nested_value + ), + "DEBUG", + ) if nested_type == "list" and not isinstance( nested_value, list @@ -497,6 +587,13 @@ def validate_component_specific_filters(self, component_specific_filters): if nested_type == "int" and "range" in nested_spec: range_values = nested_spec["range"] min_val, max_val = range_values[0], range_values[1] + self.log( + "Checking range for nested key '{0}' in component '{1}' filter '{2}': " + "value={3}, range=[{4}, {5}].".format( + nested_key, component_name, filter_name, nested_value, min_val, max_val + ), + "DEBUG", + ) if not (min_val <= nested_value <= max_val): invalid_filters.append( "Component '{0}' filter '{1}.{2}' value {3} is outside valid range [{4}, {5}]".format( @@ -515,6 +612,13 @@ def validate_component_specific_filters(self, component_specific_filters): nested_value, str ): pattern = nested_spec["pattern"] + self.log( + "Checking pattern for nested key '{0}' in component '{1}' filter '{2}': " + "value='{3}', pattern='{4}'.".format( + nested_key, component_name, filter_name, nested_value, pattern + ), + "DEBUG", + ) if not re.match(pattern, nested_value): invalid_filters.append( "Component '{0}' filter '{1}.{2}' does not match required pattern".format( @@ -529,7 +633,7 @@ def validate_component_specific_filters(self, component_specific_filters): self.fail_and_exit(self.msg) self.log( - "All component-specific filters for module '{0}' are valid.".format( + "Successfully validated all component-specific filters for module '{0}'.".format( self.module_name ), "INFO", @@ -1707,19 +1811,31 @@ def _get_playbook_path(self): def _get_last_yaml_document(self, file_path): """ - Extract the last YAML document's data from a multi-document YAML file. - Uses the '---' YAML document separator to split documents and parses only - the last one. Header comment lines are automatically ignored by the YAML parser. + Extract the effective YAML data from a file that uses + last-key-wins semantics (single-document, no --- + separators between appended blocks). + + Uses yaml.safe_load which, for duplicate top-level keys + such as config:, returns the value from the last + occurrence. This matches the append-mode file format + where new config blocks are appended without --- + separators. + + Note: For files containing multiple YAML documents + (separated with ---), yaml.safe_load may fail because it + expects a single document. In that case, this function + returns None (exception is handled). Args: - file_path (str): Path to the multi-document YAML file. + file_path (str): Path to the YAML file. Returns: dict or None: The parsed data from the last YAML document, or None if - the file is empty, doesn't exist, or parsing fails. + the file is empty, doesn't exist, does not exist, or parsing fails. """ self.log( - "Attempting to extract last YAML document from '{0}'".format(file_path), + "Reading file '{0}' to extract the last YAML " + "document.".format(file_path), "DEBUG", ) @@ -1731,8 +1847,8 @@ def _get_last_yaml_document(self, file_path): ) return None - with open(file_path, "r") as f: - content = f.read() + with open(file_path, "r") as yaml_file: + content = yaml_file.read() self.log( "Successfully read file '{0}', content length: {1} characters".format( @@ -1748,63 +1864,45 @@ def _get_last_yaml_document(self, file_path): ) return None - # Split by YAML document separator and take the last non-empty segment - documents = content.split("\n---\n") + # ----------------------------------------------- + # yaml.safe_load returns last-key-wins for + # duplicate top-level keys. Append mode omits + # --- separators so the file stays as a single + # document - safe_load returns the last config. + # ----------------------------------------------- self.log( - "File '{0}' split into {1} YAML document segment(s)".format(file_path, len(documents)), + "Parsing file '{0}' with safe_load (last-key-wins) to extract " + "last config block.".format(file_path), "DEBUG", ) - last_segment = None - total_segments = len(documents) - - for segment_number in range(total_segments, 0, -1): - segment = documents[segment_number - 1] - stripped = segment.strip() - - self.log( - "Checking segment {0}/{1}, " - "empty: {2}, length: {3} characters".format( - segment_number, total_segments, - not bool(stripped), len(stripped) - ), - "DEBUG", - ) - - if stripped: - self.log( - "Found last non-empty YAML segment at position {0}".format(segment_number), - "DEBUG", - ) - last_segment = stripped - break - - self.log( - "Segment {0} is empty, continuing to next".format(segment_number), - "DEBUG", - ) - - if last_segment is None: - self.log( - "No non-empty YAML segment found in '{0}', returning None".format(file_path), - "DEBUG", - ) - return None + last_doc = yaml.safe_load(content) self.log( - "Parsing last YAML segment from '{0}'".format(file_path), + "Extracted last YAML document from '{0}', content: {1}" + .format(file_path, last_doc), "DEBUG", ) - last_doc = yaml.safe_load(last_segment) + return last_doc + except yaml.YAMLError as yaml_err: self.log( - "Extracted last YAML document from '{0}', content: {1}" - .format(file_path, last_doc), - "DEBUG", + "YAML parsing error while reading '{0}': {1}".format( + file_path, str(yaml_err) + ), + "ERROR", ) + return None - return last_doc + except (IOError, OSError) as io_err: + self.log( + "File read error for '{0}': {1}".format( + file_path, str(io_err) + ), + "ERROR", + ) + return None except Exception as e: self.log( @@ -1815,76 +1913,44 @@ def _get_last_yaml_document(self, file_path): ) return None - def _compute_content_hash(self, content): + def strip_comment_lines(self, content): """ - Compute a SHA256 hash of file content after stripping volatile header - fields (timestamp, playbook path) so that two files generated from the - same config at different times produce an identical hash. + Return content lines with all comment lines removed. - Uses streaming hash updates per line instead of building a full - normalized string in memory, which is significantly faster and more - memory-efficient for large configuration files. + Strips every line whose first non-whitespace character is '#'. + This removes generated header blocks as well as any other + comment lines so that two YAML files differing only in + comments compare as equal during the idempotency check. Args: - content (str): Raw file content including header comments. + content (str): Raw file content including header + comments and YAML payload. Returns: - str: Hex-encoded SHA256 digest of the normalized content. + list: Content lines excluding comment lines. """ self.log( - "Starting SHA256 content hash computation. " - "Input content length: {0} characters.".format(len(content)), + "Stripping comment lines from content of " + "length {0} characters.".format(len(content)), "DEBUG", ) lines = content.splitlines() - total_lines = len(lines) - - self.log( - "Content split into {0} lines for hash processing.".format(total_lines), - "DEBUG", - ) - - hasher = hashlib.sha256() - skipped_lines = 0 - hashed_lines = 0 - - for index, line in enumerate(lines, start=1): - stripped = line.strip() - - self.log( - "Processing line {0}/{1}: '{2}'".format(index, total_lines, stripped), - "DEBUG", - ) - - # Skip lines that change every run - if stripped.startswith("# Generated on") or stripped.startswith("# Generated from"): - self.log( - "Line {0}: Skipping volatile header line: '{1}'".format(index, stripped), - "DEBUG", - ) - skipped_lines += 1 - continue - - hasher.update(line.encode("utf-8")) - hasher.update(b"\n") - hashed_lines += 1 - - self.log( - "Line {0}: Hashed successfully.".format(index), - "DEBUG", - ) - - digest = hasher.hexdigest() + filtered_lines = [ + line for line in lines if not line.strip().startswith("#") + ] self.log( - "SHA256 content hash computation completed. " - "Total lines: {0}, Lines hashed: {1}, Volatile lines skipped: {2}, " - "Computed hash: {3}".format(total_lines, hashed_lines, skipped_lines, digest), + "Stripped comment lines from content. " + "Total lines: {0}, retained lines: {1}, " + "removed comment lines: {2}".format( + len(lines), + len(filtered_lines), + len(lines) - len(filtered_lines), + ), "DEBUG", ) - - return digest + return filtered_lines def write_dict_to_yaml( self, @@ -1895,24 +1961,33 @@ def write_dict_to_yaml( notes=None, ): """ - Converts a dictionary to YAML format and writes it to a specified file path. - Supports idempotent behavior: skips writing if the content is unchanged. + Converts a dictionary to YAML format and writes it to + a specified file path. Supports idempotent behavior: + skips writing if the YAML payload is unchanged. + + For overwrite mode: compares the full rendered YAML + content while ignoring all comment lines (lines + starting with '#'). + For append mode: compares the data payload against the + last config block in the file using yaml.safe_load + last-key-wins semantics. - For overwrite mode: compares the full file content (excluding volatile header - fields like timestamp) against the new content. - For append mode: compares the data payload against the last YAML document - already present in the file. + In append mode the --- document separator is omitted + so the file remains a single YAML document where the + last config: key wins. Args: - data_dict (dict): The dictionary to convert to YAML format. - file_path (str): The path where the YAML file will be written. - file_mode (str): File write mode. Supported values: "overwrite", "append". - notes (list, optional): A list of additional comment lines to append after the - standard header information. Each string in the list will be - prefixed with "# " to maintain comment formatting. Defaults to None. - dumper: The YAML dumper class to use for serialization (default is OrderedDumper). + data_dict (dict): The dictionary to convert to YAML. + file_path (str): The path where the YAML file will + be written. + file_mode (str): 'overwrite' or 'append'. + dumper: The YAML dumper class (default OrderedDumper). + notes (list, optional): Additional comment lines to + append after the standard header. + Returns: - bool: True if the file was written (content changed), False if skipped (no change). + bool: True if written (content changed), False if + skipped (no change). """ self.log( @@ -1937,7 +2012,24 @@ def write_dict_to_yaml( self.fail_and_exit(self.msg) header_comments = self.add_header_comments(notes=notes) - yaml_content = header_comments + "\n---\n" + yaml_content + + # Use --- separator only for overwrite mode or when file doesn't exist yet. + # In append mode, skip --- so the file remains a single YAML document + # where the last config: key wins (history-style structure). + if file_mode == "append" and os.path.isfile(file_path) and os.path.getsize(file_path) > 0: + self.log( + "Append mode with non-empty existing file '{0}'. Building YAML " + "content without document separator.".format(file_path), + "DEBUG", + ) + yaml_content = "\n" + header_comments + "\n" + yaml_content + else: + self.log( + "Using standard YAML content format for '{0}' with header comments " + "and document separator.".format(file_path), + "DEBUG", + ) + yaml_content = header_comments + "\n---\n" + yaml_content self.log("Dictionary successfully converted to YAML format.", "DEBUG") @@ -1945,7 +2037,8 @@ def write_dict_to_yaml( if file_mode == "overwrite" and os.path.isfile(file_path): self.log( "Overwrite mode: Existing file found at '{0}'. " - "Starting idempotency check by comparing content hashes.".format(file_path), + "Starting idempotency check by comparing full YAML content " + "without header comments.".format(file_path), "DEBUG", ) @@ -1960,26 +2053,17 @@ def write_dict_to_yaml( "DEBUG", ) - existing_hash = self._compute_content_hash(existing_content) - new_hash = self._compute_content_hash(yaml_content) - - self.log( - "Content hash comparison for '{0}': existing_hash={1}, new_hash={2}".format( - file_path, existing_hash, new_hash - ), - "DEBUG", - ) - - if existing_hash == new_hash: + if self.strip_comment_lines(existing_content) == self.strip_comment_lines(yaml_content): self.log( - "Overwrite mode: File '{0}' already has identical content (hash match). " - "Skipping write.".format(file_path), + "Overwrite mode: File '{0}' already has identical YAML content " + "after excluding header comments. Skipping write.".format(file_path), "INFO", ) return False self.log( - "Overwrite mode: Content hashes differ for '{0}'. Proceeding with write.".format(file_path), + "Overwrite mode: YAML content differs for '{0}' after excluding " + "header comments. Proceeding with write.".format(file_path), "DEBUG", ) diff --git a/plugins/modules/accesspoint_location_playbook_config_generator.py b/plugins/modules/accesspoint_location_playbook_config_generator.py index 4114fae225..730b72f54b 100644 --- a/plugins/modules/accesspoint_location_playbook_config_generator.py +++ b/plugins/modules/accesspoint_location_playbook_config_generator.py @@ -980,55 +980,54 @@ def get_have(self, config): "INFO" ) - if len(site_list) == 1 and site_list[0].lower() == "all": + if self._is_wildcard_list(site_list, "Site list"): self.log( - "Site list contains 'all' keyword. Skipping site validation, all floors " - "with APs will be included in YAML generation.", - "INFO" + "Wildcard 'all' detected in site_list. Skipping validation of floor site existence. " + "All floor sites from Catalyst Center will be included in YAML generation.", "INFO" ) return self - else: + + self.log( + f"Validating {len(site_list)} floor site(s) exist in filtered_floor data " + f"populated by collect_all_accesspoint_location_list(). Each site must exist " + f"or playbook will fail.", + "DEBUG" + ) + missing_floors = [] + for floor_index, floor_name in enumerate(site_list, start=1): self.log( - f"Validating {len(site_list)} floor site(s) exist in filtered_floor data " - f"populated by collect_all_accesspoint_location_list(). Each site must exist " - f"or playbook will fail.", + f"Validating floor site {floor_index}/{len(site_list)}: '{floor_name}'. " + f"Checking existence in filtered_floor list.", "DEBUG" ) - missing_floors = [] - for floor_index, floor_name in enumerate(site_list, start=1): + floor_exist = self.find_dict_by_key_value( + self.have["filtered_floor"], "floor_site_hierarchy", floor_name + ) + + if not floor_exist: + missing_floors.append(floor_name) self.log( - f"Validating floor site {floor_index}/{len(site_list)}: '{floor_name}'. " - f"Checking existence in filtered_floor list.", - "DEBUG" - ) - floor_exist = self.find_dict_by_key_value( - self.have["filtered_floor"], "floor_site_hierarchy", floor_name + f"Floor site hierarchy '{floor_name}' does not exist or has no " + f"access points configured. Adding to missing_floors list.", + "WARNING" ) - - if not floor_exist: - missing_floors.append(floor_name) - self.log( - f"Floor site hierarchy '{floor_name}' does not exist or has no " - f"access points configured. Adding to missing_floors list.", - "WARNING" - ) - else: - self.log( - f"Floor site {floor_index}/{len(site_list)}: '{floor_name}' " - f"validated successfully. Floor exists with AP configurations.", - "DEBUG" - ) - - if missing_floors: - self.msg = ( - f"The following floor site hierarchies do not exist or have no access " - f"points configured: {missing_floors}. Total missing: " - f"{len(missing_floors)}/{len(site_list)} requested. Please verify site " - f"hierarchy paths are correct (case-sensitive, full path from Global) " - f"and floors have APs positioned on floor maps before retrying." + else: + self.log( + f"Floor site {floor_index}/{len(site_list)}: '{floor_name}' " + f"validated successfully. Floor exists with AP configurations.", + "DEBUG" ) - self.log(self.msg, "ERROR") + if missing_floors: + self.msg = ( + f"The following floor site hierarchies do not exist or have no access " + f"points configured: {missing_floors}. Total missing: " + f"{len(missing_floors)}/{len(site_list)} requested. Please verify site " + f"hierarchy paths are correct (case-sensitive, full path from Global) " + f"and floors have APs positioned on floor maps before retrying." + ) + self.log(self.msg, "ERROR") + else: self.log( f"All {len(site_list)} floor site(s) validated successfully. All requested " f"floors exist with access point configurations.", @@ -1044,58 +1043,58 @@ def get_have(self, config): "INFO" ) - if len(planned_ap_list) == 1 and planned_ap_list[0].lower() == "all": + if self._is_wildcard_list(planned_ap_list, "Planned AP list"): self.log( - "Planned AP list contains 'all' keyword. Skipping planned AP validation, " - "all planned APs will be included in YAML generation.", - "INFO" + "Wildcard 'all' detected in planned_accesspoint_list. Skipping validation of " + "planned AP existence. All planned APs from Catalyst Center will be included in " + "YAML generation.", "INFO" ) return self - else: + + self.log( + f"Validating {len(planned_ap_list)} planned AP(s) exist in " + f"all_detailed_config data. Each planned AP must exist or playbook will fail.", + "DEBUG" + ) + missing_planned_aps = [] + for ap_index, planned_ap in enumerate(planned_ap_list, start=1): self.log( - f"Validating {len(planned_ap_list)} planned AP(s) exist in " - f"all_detailed_config data. Each planned AP must exist or playbook will fail.", + f"Validating planned AP {ap_index}/{len(planned_ap_list)}: " + f"'{planned_ap}'. Checking existence in all_detailed_config.", "DEBUG" ) - missing_planned_aps = [] - for ap_index, planned_ap in enumerate(planned_ap_list, start=1): + ap_exist = self.find_dict_by_key_value( + self.have["all_detailed_config"], "accesspoint_name", planned_ap + ) + + if not ap_exist or ap_exist.get("accesspoint_type") == "real": + missing_planned_aps.append(planned_ap) self.log( - f"Validating planned AP {ap_index}/{len(planned_ap_list)}: " - f"'{planned_ap}'. Checking existence in all_detailed_config.", - "DEBUG" - ) - ap_exist = self.find_dict_by_key_value( - self.have["all_detailed_config"], "accesspoint_name", planned_ap + f"Planned access point '{planned_ap}' does not exist or is marked " + f"as 'real' type. Adding to missing_planned_aps list.", + "WARNING" ) - - if not ap_exist or ap_exist.get("accesspoint_type") == "real": - missing_planned_aps.append(planned_ap) - self.log( - f"Planned access point '{planned_ap}' does not exist or is marked " - f"as 'real' type. Adding to missing_planned_aps list.", - "WARNING" - ) - else: - self.log( - f"Planned AP {ap_index}/{len(planned_ap_list)}: '{planned_ap}' " - f"validated successfully. AP exists with type 'planned'.", - "DEBUG" - ) - - if missing_planned_aps: - self.msg = ( - f"The following planned access points do not exist: {missing_planned_aps}. " - f"Total missing: {len(missing_planned_aps)}/{len(planned_ap_list)} " - f"requested. Please verify planned AP names are correct (case-sensitive) " - f"and APs are configured as planned (not real) on floor maps before retrying." + else: + self.log( + f"Planned AP {ap_index}/{len(planned_ap_list)}: '{planned_ap}' " + f"validated successfully. AP exists with type 'planned'.", + "DEBUG" ) - self.log(self.msg, "ERROR") - self.log( - f"All {len(planned_ap_list)} planned AP(s) validated successfully. All " - f"requested planned APs exist in Catalyst Center.", - "INFO" + if missing_planned_aps: + self.msg = ( + f"The following planned access points do not exist: {missing_planned_aps}. " + f"Total missing: {len(missing_planned_aps)}/{len(planned_ap_list)} " + f"requested. Please verify planned AP names are correct (case-sensitive) " + f"and APs are configured as planned (not real) on floor maps before retrying." ) + self.log(self.msg, "ERROR") + + self.log( + f"All {len(planned_ap_list)} planned AP(s) validated successfully. All " + f"requested planned APs exist in Catalyst Center.", + "INFO" + ) # Process real_accesspoint_list filter if real_ap_list and isinstance(real_ap_list, list): @@ -1106,58 +1105,58 @@ def get_have(self, config): "INFO" ) - if len(real_ap_list) == 1 and real_ap_list[0].lower() == "all": + if self._is_wildcard_list(real_ap_list, "Real AP list"): self.log( - "Real AP list contains 'all' keyword. Skipping real AP validation, all " - "real/deployed APs will be included in YAML generation.", - "INFO" + "Wildcard 'all' detected in real_accesspoint_list. Skipping validation of " + "real AP existence. All real/deployed APs from Catalyst Center will be included in " + "YAML generation.", "INFO" ) return self - else: + + self.log( + f"Validating {len(real_ap_list)} real AP(s) exist in all_detailed_config " + f"data. Each real AP must exist or playbook will fail.", + "DEBUG" + ) + missing_real_aps = [] + for ap_index, real_ap in enumerate(real_ap_list, start=1): self.log( - f"Validating {len(real_ap_list)} real AP(s) exist in all_detailed_config " - f"data. Each real AP must exist or playbook will fail.", + f"Validating real AP {ap_index}/{len(real_ap_list)}: '{real_ap}'. " + f"Checking existence in all_detailed_config.", "DEBUG" ) - missing_real_aps = [] - for ap_index, real_ap in enumerate(real_ap_list, start=1): + ap_exist = self.find_dict_by_key_value( + self.have["all_detailed_config"], "accesspoint_name", real_ap + ) + + if not ap_exist or ap_exist.get("accesspoint_type") != "real": + missing_real_aps.append(real_ap) self.log( - f"Validating real AP {ap_index}/{len(real_ap_list)}: '{real_ap}'. " - f"Checking existence in all_detailed_config.", - "DEBUG" - ) - ap_exist = self.find_dict_by_key_value( - self.have["all_detailed_config"], "accesspoint_name", real_ap + f"Real access point '{real_ap}' does not exist or is not marked " + f"as 'real' type. Adding to missing_real_aps list.", + "WARNING" ) - - if not ap_exist or ap_exist.get("accesspoint_type") != "real": - missing_real_aps.append(real_ap) - self.log( - f"Real access point '{real_ap}' does not exist or is not marked " - f"as 'real' type. Adding to missing_real_aps list.", - "WARNING" - ) - else: - self.log( - f"Real AP {ap_index}/{len(real_ap_list)}: '{real_ap}' validated " - f"successfully. AP exists with type 'real'.", - "DEBUG" - ) - - if missing_real_aps: - self.msg = ( - f"The following real access points do not exist: {missing_real_aps}. " - f"Total missing: {len(missing_real_aps)}/{len(real_ap_list)} requested. " - f"Please verify real AP names are correct (case-sensitive) and APs are " - f"deployed and visible in Catalyst Center before retrying." + else: + self.log( + f"Real AP {ap_index}/{len(real_ap_list)}: '{real_ap}' validated " + f"successfully. AP exists with type 'real'.", + "DEBUG" ) - self.log(self.msg, "ERROR") - self.log( - f"All {len(real_ap_list)} real AP(s) validated successfully. All requested " - f"real/deployed APs exist in Catalyst Center.", - "INFO" + if missing_real_aps: + self.msg = ( + f"The following real access points do not exist: {missing_real_aps}. " + f"Total missing: {len(missing_real_aps)}/{len(real_ap_list)} requested. " + f"Please verify real AP names are correct (case-sensitive) and APs are " + f"deployed and visible in Catalyst Center before retrying." ) + self.log(self.msg, "ERROR") + + self.log( + f"All {len(real_ap_list)} real AP(s) validated successfully. All requested " + f"real/deployed APs exist in Catalyst Center.", + "INFO" + ) # Process accesspoint_model_list filter if model_list and isinstance(model_list, list): @@ -1168,58 +1167,58 @@ def get_have(self, config): "INFO" ) - if len(model_list) == 1 and model_list[0].lower() == "all": + if self._is_wildcard_list(model_list, "AP model list"): self.log( - "AP model list contains 'all' keyword. Skipping model validation, all AP " - "models will be included in YAML generation.", - "INFO" + "Wildcard 'all' detected in accesspoint_model_list. Skipping validation of AP " + "model existence. All AP models from Catalyst Center will be included in YAML " + "generation.", "INFO" ) return self - else: + + self.log( + f"Validating {len(model_list)} AP model(s) exist in all_detailed_config " + f"data. Each model must have at least one AP or playbook will fail.", + "DEBUG" + ) + missing_models = [] + for model_index, model in enumerate(model_list, start=1): self.log( - f"Validating {len(model_list)} AP model(s) exist in all_detailed_config " - f"data. Each model must have at least one AP or playbook will fail.", + f"Validating AP model {model_index}/{len(model_list)}: '{model}'. " + f"Searching for APs with this model in all_detailed_config.", "DEBUG" ) - missing_models = [] - for model_index, model in enumerate(model_list, start=1): + aps_exist = self.find_multiple_dict_by_key_value( + self.have["all_detailed_config"], "accesspoint_model", model + ) + + if not aps_exist: + missing_models.append(model) self.log( - f"Validating AP model {model_index}/{len(model_list)}: '{model}'. " - f"Searching for APs with this model in all_detailed_config.", - "DEBUG" - ) - aps_exist = self.find_multiple_dict_by_key_value( - self.have["all_detailed_config"], "accesspoint_model", model + f"Access point model '{model}' not found in Catalyst Center. No " + f"APs with this model exist. Adding to missing_models list.", + "WARNING" ) - - if not aps_exist: - missing_models.append(model) - self.log( - f"Access point model '{model}' not found in Catalyst Center. No " - f"APs with this model exist. Adding to missing_models list.", - "WARNING" - ) - else: - self.log( - f"AP model {model_index}/{len(model_list)}: '{model}' validated " - f"successfully. Found {len(aps_exist)} AP(s) with this model.", - "DEBUG" - ) - - if missing_models: - self.msg = ( - f"The following access point models do not exist: {missing_models}. " - f"Total missing: {len(missing_models)}/{len(model_list)} requested. " - f"Please verify AP model names are correct (case-sensitive, exact match) " - f"and APs with these models are deployed in Catalyst Center before retrying." + else: + self.log( + f"AP model {model_index}/{len(model_list)}: '{model}' validated " + f"successfully. Found {len(aps_exist)} AP(s) with this model.", + "DEBUG" ) - self.log(self.msg, "ERROR") - self.log( - f"All {len(model_list)} AP model(s) validated successfully. All requested " - f"models have deployed APs in Catalyst Center.", - "INFO" + if missing_models: + self.msg = ( + f"The following access point models do not exist: {missing_models}. " + f"Total missing: {len(missing_models)}/{len(model_list)} requested. " + f"Please verify AP model names are correct (case-sensitive, exact match) " + f"and APs with these models are deployed in Catalyst Center before retrying." ) + self.log(self.msg, "ERROR") + + self.log( + f"All {len(model_list)} AP model(s) validated successfully. All requested " + f"models have deployed APs in Catalyst Center.", + "INFO" + ) # Process mac_address_list filter if mac_list and isinstance(mac_list, list): @@ -1230,59 +1229,59 @@ def get_have(self, config): "INFO" ) - if len(mac_list) == 1 and mac_list[0].lower() == "all": + if self._is_wildcard_list(mac_list, "MAC address list"): self.log( - "MAC address list contains 'all' keyword. Skipping MAC validation, all " - "APs with MAC addresses will be included in YAML generation.", - "INFO" + "Wildcard 'all' detected in mac_address_list. Skipping validation of MAC " + "address existence. All MAC addresses from Catalyst Center will be included in " + "YAML generation.", "INFO" ) return self - else: + + self.log( + f"Validating {len(mac_list)} MAC address(es) exist in all_detailed_config " + f"data. Each MAC must match an AP or playbook will fail.", + "DEBUG" + ) + missing_macs = [] + for mac_index, mac in enumerate(mac_list, start=1): + normalized_mac = mac.lower() self.log( - f"Validating {len(mac_list)} MAC address(es) exist in all_detailed_config " - f"data. Each MAC must match an AP or playbook will fail.", + f"Validating MAC address {mac_index}/{len(mac_list)}: '{normalized_mac}' " + f"(normalized). Searching for AP with this MAC in all_detailed_config.", "DEBUG" ) - missing_macs = [] - for mac_index, mac in enumerate(mac_list, start=1): - normalized_mac = mac.lower() + aps_exist = self.find_multiple_dict_by_key_value( + self.have["all_detailed_config"], "mac_address", normalized_mac + ) + + if not aps_exist: + missing_macs.append(mac) self.log( - f"Validating MAC address {mac_index}/{len(mac_list)}: '{normalized_mac}' " - f"(normalized). Searching for AP with this MAC in all_detailed_config.", - "DEBUG" - ) - aps_exist = self.find_multiple_dict_by_key_value( - self.have["all_detailed_config"], "mac_address", normalized_mac + f"MAC address '{normalized_mac}' not found in Catalyst Center. No " + f"AP with this MAC exists. Adding to missing_macs list.", + "WARNING" ) - - if not aps_exist: - missing_macs.append(mac) - self.log( - f"MAC address '{normalized_mac}' not found in Catalyst Center. No " - f"AP with this MAC exists. Adding to missing_macs list.", - "WARNING" - ) - else: - self.log( - f"MAC address {mac_index}/{len(mac_list)}: '{normalized_mac}' " - f"validated successfully. Found {len(aps_exist)} AP(s) with this MAC.", - "DEBUG" - ) - - if missing_macs: - self.msg = ( - f"The following MAC addresses do not exist: {missing_macs}. Total " - f"missing: {len(missing_macs)}/{len(mac_list)} requested. Please verify " - f"MAC addresses are correct (format: aa:bb:cc:dd:ee:ff) and APs with " - f"these MACs are deployed in Catalyst Center before retrying." + else: + self.log( + f"MAC address {mac_index}/{len(mac_list)}: '{normalized_mac}' " + f"validated successfully. Found {len(aps_exist)} AP(s) with this MAC.", + "DEBUG" ) - self.log(self.msg, "ERROR") - self.log( - f"All {len(mac_list)} MAC address(es) validated successfully. All requested " - f"MAC addresses match deployed APs in Catalyst Center.", - "INFO" + if missing_macs: + self.msg = ( + f"The following MAC addresses do not exist: {missing_macs}. Total " + f"missing: {len(missing_macs)}/{len(mac_list)} requested. Please verify " + f"MAC addresses are correct (format: aa:bb:cc:dd:ee:ff) and APs with " + f"these MACs are deployed in Catalyst Center before retrying." ) + self.log(self.msg, "ERROR") + + self.log( + f"All {len(mac_list)} MAC address(es) validated successfully. All requested " + f"MAC addresses match deployed APs in Catalyst Center.", + "INFO" + ) self.log( f"Current State (have): {self.pprint(self.have)}. Data collection and validation " @@ -1296,6 +1295,33 @@ def get_have(self, config): self.msg = "Successfully retrieved access point location details from Cisco Catalyst Center." return self + def _is_wildcard_list(self, item_list, list_name): + """Check if the given list contains the 'all' wildcard keyword. + + Args: + item_list (list): The list to check for 'all' keyword. + list_name (str): Human-readable name for logging (e.g., "Site list"). + + Returns: + bool: True if 'all' is found (case-insensitive), False otherwise. + """ + if any(item.lower() == "all" for item in item_list): + if len(item_list) > 1: + self.log( + "{0} contains 'all' keyword along with {1} other entries. " + "The 'all' keyword takes precedence — all other entries " + "will be ignored.".format(list_name, len(item_list) - 1), + "WARNING" + ) + self.log( + "{0} contains 'all' keyword. Skipping validation — all items " + "will be included.".format(list_name), + "INFO" + ) + return True + + return False + def find_multiple_dict_by_key_value(self, data_list, key, value): """ Searches for and returns all dictionaries matching a specific key-value pair. @@ -3014,7 +3040,7 @@ def process_global_filters(self, global_filters): "INFO" ) - if len(site_list) == 1 and site_list[0].lower() == "all": + if self._is_wildcard_list(site_list, "Site list"): self.log( "Site list contains 'all' keyword. Returning complete planned AP configuration " "collection without individual site validation. This bypasses per-site matching.", @@ -3097,7 +3123,7 @@ def process_global_filters(self, global_filters): "INFO" ) - if len(planned_accesspoint_list) == 1 and planned_accesspoint_list[0].lower() == "all": + if self._is_wildcard_list(planned_accesspoint_list, "Planned AP list"): self.log( "Planned AP list contains 'all' keyword. Returning complete planned AP configuration " "collection without individual AP validation.", @@ -3219,7 +3245,7 @@ def process_global_filters(self, global_filters): "INFO" ) - if len(real_accesspoint_list) == 1 and real_accesspoint_list[0].lower() == "all": + if self._is_wildcard_list(real_accesspoint_list, "Real AP list"): self.log( "Real AP list contains 'all' keyword. Returning complete real AP configuration " "collection without individual AP validation.", @@ -3342,7 +3368,7 @@ def process_global_filters(self, global_filters): "INFO" ) - if len(accesspoint_model_list) == 1 and accesspoint_model_list[0].lower() == "all": + if self._is_wildcard_list(accesspoint_model_list, "AP model list"): self.log( "AP model list contains 'all' keyword. Returning complete AP configuration collection " "(planned + real) without individual model validation.", diff --git a/plugins/modules/accesspoint_location_workflow_manager.py b/plugins/modules/accesspoint_location_workflow_manager.py index 5cbccb8b44..52840773b7 100644 --- a/plugins/modules/accesspoint_location_workflow_manager.py +++ b/plugins/modules/accesspoint_location_workflow_manager.py @@ -3420,9 +3420,7 @@ def get_diff_deleted(self, config): if self.location_deleted: deleted_count = len(self.location_deleted) success_msg = ( - "Access point positions deleted successfully: {0}".format( - self.location_deleted - ) + "Access point positions deleted successfully." ) self.log(success_msg, "INFO") self.msg = success_msg @@ -3493,8 +3491,25 @@ def get_diff_deleted(self, config): self.msg = "Access point position deletion workflow completed" self.log(self.msg, "INFO") + if self.location_deleted: + self.result_response['accesspoint_deletion'].append( + "The access point positions for {0}".format(", ".join(self.location_deleted)) + + " have been successfully deleted from the site " + site_hierarchy) + + if self.location_already_deleted: + self.result_response['already_processed'].append( + "No changes required - access point positions already deleted and " + "verified successfully: {0}".format(", ".join(self.location_already_deleted)) + + " from the site " + site_hierarchy) + + if self.location_not_deleted: + self.result_response['unprocessed'].append( + f"Unable to delete the following access point positions: {self.location_not_deleted}" + + " from the site " + site_hierarchy) + + # Set verification results and validate return status self.set_operation_result( - self.status, self.changed, self.msg, "INFO" + self.status, self.changed, self.msg, "INFO", self.result_response ).check_return_status() return self diff --git a/plugins/modules/application_policy_workflow_manager.py b/plugins/modules/application_policy_workflow_manager.py index 48cda24144..15f8530c5f 100644 --- a/plugins/modules/application_policy_workflow_manager.py +++ b/plugins/modules/application_policy_workflow_manager.py @@ -272,20 +272,51 @@ the application policy. type: str clause: - description: Defines specific rules or conditions - under which an application set is added - to the application policy. + description: + - Defines specific rules or conditions + under which application sets are added + to or removed from the application + policy. + - When C(state=merged), the clause defines + application sets to add to the policy. + - When C(state=deleted) and clause is + provided, it specifies which application + sets to remove from the policy. + - If C(relevance_details) contains only + C(relevance) without + C(application_set_name), all application + sets under that relevance level are + removed. + - If C(relevance_details) contains both + C(relevance) and C(application_set_name), + only those specific application sets + are removed. + - If clause is omitted during a delete + operation, the entire application + policy is deleted. + - Only C(clause_type) + C(BUSINESS_RELEVANCE) supports partial + deletion of application sets. The + C(APPLICATION_POLICY_KNOBS) clause type + is not affected by delete operations. type: list elements: dict suboptions: clause_type: description: | - - Specifies the type of clause for the application policy. + - Specifies the type of clause for + the application policy. - Permissible values: - - "BUSINESS_RELEVANCE": Defines the importance of the application to business operations, affecting its priority and - handling in the network policy. - - "APPLICATION_POLICY_KNOBS": Configurable settings that manage the application's network behavior, - such as traffic prioritization and resource allocation. + - "BUSINESS_RELEVANCE": Defines + the importance of the application + to business operations, affecting + its priority and handling in the + network policy. + - "APPLICATION_POLICY_KNOBS": + Configurable settings that manage + the application's network behavior, + such as traffic prioritization + and resource allocation. type: str relevance_details: description: Details about how relevant @@ -295,16 +326,52 @@ suboptions: relevance: description: | - - Specifies whether the application set is relevant to the application policy. + - Specifies the relevance level + of the application set within + the application policy. + - When C(state=merged), determines + which relevance group the + application sets are added to. + - When C(state=deleted), specifies + which relevance level to target + for application set removal. - Permissible values: - - "BUSINESS_RELEVANT": The application is critical for business functions. - - "BUSINESS_IRRELEVANT": The application is not essential for business operations. - - "DEFAULT": A default setting when no specific relevance is assigned. + - "BUSINESS_RELEVANT": The + application is critical for + business functions. + - "BUSINESS_IRRELEVANT": The + application is not essential + for business operations. + - "DEFAULT": A default setting + when no specific relevance + is assigned. type: str application_set_name: - description: Include all the application - sets for which the application - policy has to be created + description: + - List of application set names to + associate with or remove from + the specified relevance level. + - Matched against the application + set names currently associated + with the policy in Cisco + Catalyst Center. + - When C(state=merged), includes + the application sets to add to + the policy under this relevance + level. + - When C(state=deleted), specifies + which application sets to + remove from the given relevance + level. + - If omitted with C(state=deleted), + all application sets under the + specified relevance level are + removed. + - Scoped per C(relevance_details) + entry, not globally across the + entire clause. + - "For example: ['file-sharing', + 'collaboration-apps']." type: list elements: str requirements: @@ -389,6 +456,7 @@ bulk_data: "10" scavenger: "2" real_time_interactive: "34" + # Playbook - Enterprise QoS Profile (Common Across All Interface Speeds) - name: Deploy Enterprise QoS Profile in Cisco Catalyst Center @@ -436,6 +504,7 @@ broadcast_video: "2" network_control: "3" bulk_data: "5" + # Playbook - QoS Profile Based on Interface Speeds - name: Deploy Interface-Specific QoS Profile in Cisco Catalyst Center @@ -554,6 +623,7 @@ broadcast_video: "2" network_control: "3" bulk_data: "5" + # Playbook - for some interface speeds having common bandwidth percentage - name: Configure an Application Queueing Profile for Traffic Prioritization @@ -644,6 +714,7 @@ broadcast_video: "2" network_control: "3" bulk_data: "5" + # Playbook - application queuing profile - type dscp - name: Configure Application Queuing Profile (DSCP) in Cisco Catalyst Center @@ -687,6 +758,7 @@ bulk_data: "10" scavenger: "2" real_time_interactive: "34" + # Playbook - update application queuing profile - name: Application Queuing Profile update in Cisco Catalyst Center @@ -749,6 +821,7 @@ bulk_data: "10" scavenger: "2" real_time_interactive: "34" + # Playbook - delete application queuing profile - name: Delete application queuing profile from Cisco Catalyst Center @@ -777,6 +850,7 @@ config: - queuing_profile: - profile_name: "Enterprise_Traffic_Profile" # Profile to be deleted + # Playbook - create application policy – wired - name: Create Wired Application Policy in Cisco Catalyst Center @@ -817,6 +891,7 @@ application_set_name: ["email", "tunneling"] - relevance: "DEFAULT" application_set_name: ["backup-and-storage", "general-media", "file-sharing"] + # Playbook - create application policy – wireless - name: Create Wireless Application Policy in Cisco Catalyst Center @@ -858,6 +933,7 @@ application_set_name: ["email", "backup-and-storage"] - relevance: "DEFAULT" application_set_name: ["collaboration-apps", "tunneling", "general-media"] + # Playbook - delete application policy - name: Delete Application Policy from Cisco Catalyst Center @@ -885,6 +961,77 @@ config: - application_policy: - name: "ObsoleteTrafficPolicy" + +# Playbook - delete all application sets under a specific +# relevance level from an application policy +- name: Remove all application sets under a relevance + level from an application policy + hosts: localhost + connection: local + gather_facts: false + vars_files: + - "credentials.yml" + tasks: + - name: Delete all BUSINESS_RELEVANT application + sets from policy + cisco.dnac.application_policy_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - application_policy: + - name: "wired_traffic_policy" + clause: + - clause_type: "BUSINESS_RELEVANCE" + relevance_details: + - relevance: "BUSINESS_RELEVANT" + +# Playbook - delete specific application set(s) from +# an application policy +- name: Remove specific application set(s) from an + application policy in Cisco Catalyst Center + hosts: localhost + connection: local + gather_facts: false + vars_files: + - "credentials.yml" + tasks: + - name: Delete specific application set(s) from + a policy + cisco.dnac.application_policy_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - application_policy: + - name: "wired_traffic_policy" + clause: + - clause_type: "BUSINESS_RELEVANCE" + relevance_details: + - relevance: "BUSINESS_RELEVANT" + application_set_name: ["collaboration-apps"] + - relevance: "BUSINESS_IRRELEVANT" + application_set_name: ["email", "tunneling"] """ RETURN = r""" @@ -1159,6 +1306,7 @@ def __init__(self, module): self.no_update_application_policy, ) = ([], [], []) self.deleted_application_policy, self.no_deleted_application_policy = [], [] + self.deleted_application_set_from_policy = [] ( self.created_queuing_profile, self.updated_queuing_profile, @@ -6210,90 +6358,284 @@ def get_diff_deleted(self, config): def delete_application_policy(self): """ - Delete an existing application policy or just the application set(s) if specified in the playbook. + Delete an existing application policy or specific + application sets based on clause configuration. Args: - self (object): An instance of the class for interacting with Cisco Catalyst Center. + self (object): An instance of the class for + interacting with Cisco Catalyst Center. + Reads from self.config["application_policy"] + (list[dict]), where each dict may contain: + - name (str): Policy name. Required. + - clause (list[dict], optional): Each dict + has clause_type (str) and + relevance_details (list[dict]). + Each relevance_details dict has: + - relevance (str): The relevance level. + - application_set_name (list[str], + optional): Specific sets to remove. Returns: - self: The updated instance with 'status', 'msg', and 'result' attributes. + self: The updated instance with 'status', + 'msg', and 'result' attributes. Description: - This method deletes an application policy or only the application set(s) from Cisco Catalyst Center. - If 'application_set_name' is provided in the playbook, only the application set will be deleted. - If not, the entire policy will be deleted. If the policy does not exist, a message is logged. - If an error occurs, it is caught and handled appropriately. + Deletes application policies or application + sets from Cisco Catalyst Center. + - If no 'clause' is provided in the config + entry, the entire policy is deleted. + - If 'clause' with 'relevance_details' is + provided: + - With 'application_set_name': only the + specified sets are removed from that + relevance level. + - Without 'application_set_name': all sets + under that relevance level are removed. + - If the policy does not exist, the policy + name is added to + self.no_deleted_application_policy and + processing continues to the next entry. """ - application_policy_details = self.config.get("application_policy", []) - exists_false, exists_true, success_msg, failed_msg = [], [], [], [] - application_sets_deleted = ( - [] - ) # To track which application sets were deleted from which policies - application_set_not_present = ( - [] - ) # To track missing application sets for policies - - # Loop through each policy in the config - for policy in application_policy_details: - policy_name = policy.get("name") - application_set_name_in_config = policy.get("application_set_name") + self.log( + "Starting deletion of application policies. " + "Total policies to process: {0}. " + "Policy names: {1}.".format( + len(application_policy_details), + [p.get("name") for p in application_policy_details], + ), + "INFO", + ) + success_msg, failed_msg = [], [] + application_sets_deleted = [] + application_set_not_present = [] + total_policies = len(application_policy_details) + for policy_idx, policy in enumerate(application_policy_details, start=1): + policy_name = policy.get("name") + self.log( + "Processing policy {0}/{1} with name='{2}'.".format( + policy_idx, total_policies, policy_name + ), + "DEBUG", + ) + clause_config = policy.get("clause") + self.log( + "Verifying policy {0}/{1}: '{2}'.".format( + policy_idx, total_policies, policy_name + ), + "DEBUG", + ) # Fetch current application policy details application_policy_exists, current_application_policy = ( self.get_application_policy_details(policy_name) ) if not application_policy_exists: - exists_false.append(policy_name) + self.log( + "Policy '{0}' does not exist. Skipping " + "deletion. Continuing.".format(policy_name), + "INFO", + ) failed_msg.append(policy_name) + self.no_deleted_application_policy.append(policy_name) continue - ids_list = [] # Store the IDs of application sets or policies to be deleted - application_set_names = [] # List to track valid application sets - application_set_name_not_available = ( - [] - ) # List of application sets not found + ids_list = [] + application_set_names_deleted = [] - if application_set_name_in_config: - # If application set name is provided, check if they exist or are already deleted - for current_policy in current_application_policy: - if "id" in current_policy: - for app_name in application_set_name_in_config: - if app_name in current_policy.get("name", ""): - application_set_names.append(app_name) - ids_list.append( - current_policy.get("id") - ) # Add the application set's ID - break - - # Identify any application sets that are missing in the policy - application_set_name_not_available = [ - app_name - for app_name in application_set_name_in_config - if app_name not in application_set_names - ] + if clause_config: + # Build a set of (relevance, app_set_name) pairs to delete + # and a set of relevance levels where ALL sets should be deleted + targeted_sets = set() + delete_all_for_relevance = set() - # Proceed with valid application sets even if some are not available - if application_set_name_not_available: - application_set_not_present.append( - (policy_name, application_set_name_not_available) + total_clauses = len(clause_config) + for clause_idx, clause_item in enumerate(clause_config): + relevance_details = clause_item.get("relevance_details", []) + self.log( + "Processing clause {0}/{1} with type='{2}'.".format( + clause_idx + 1, + total_clauses, + clause_item.get("clause_type", "unknown") + ), + "DEBUG", ) + + total_details = len(relevance_details) + for detail_idx, detail in enumerate(relevance_details): + relevance = detail.get("relevance") + self.log( + "Processing relevance_detail {0}/{1} " + "with relevance='{2}'.".format( + detail_idx + 1, total_details, relevance + ), + "DEBUG", + ) + app_set_names = detail.get("application_set_name") + if app_set_names: + total_names = len(app_set_names) + for name_idx, name in enumerate(app_set_names): + self.log( + "Adding targeted set {0}/{1}: " + "relevance='{2}', name='{3}'.".format( + name_idx + 1, total_names, relevance, name + ), + "DEBUG", + ) + targeted_sets.add((relevance, name)) + else: + # No application_set_name means delete ALL sets under this relevance + self.log( + "No specific application sets provided for relevance='{0}'. " + "All sets under this relevance will be deleted.".format( + relevance + ), + "DEBUG", + ) + delete_all_for_relevance.add(relevance) + + self.log( + "Policy '{0}': targeted_sets={1}, delete_all_for_relevance={2}".format( + policy_name, targeted_sets, delete_all_for_relevance + ), + "DEBUG", + ) + + # Match current policy entries to the targeted sets/relevance levels + prefix = policy_name + "_" + + total_current = len(current_application_policy) + for cp_idx, current_policy in enumerate( + current_application_policy + ): + current_name = current_policy.get("name", "") + self.log( + "Evaluating policy entry {0}/{1} " + "with name='{2}'.".format( + cp_idx + 1, total_current, current_name + ), + "DEBUG", + ) + current_name = current_policy.get("name", "") + policy_id = current_policy.get("id") + if not policy_id: + self.log( + "Skipping policy entry '{0}' — no 'id' " + "field found. Continuing.".format( + current_name + ), + "DEBUG", + ) + continue + + # Determine the relevance level from exclusiveContract clause + entry_relevance = None + exclusive_contract = current_policy.get("exclusiveContract", {}) + contract_clauses = exclusive_contract.get("clause", []) + for cc_idx, cc in enumerate(contract_clauses): + if cc.get("type") == "BUSINESS_RELEVANCE": + entry_relevance = cc.get("relevanceLevel") + self.log( + "Found BUSINESS_RELEVANCE at clause " + "index {0} with relevanceLevel='{1}'. " + "Breaking out of loop.".format( + cc_idx, entry_relevance + ), + "DEBUG", + ) + break + + if not entry_relevance: + self.log( + "Skipping policy entry '{0}' — no " + "BUSINESS_RELEVANCE clause found. " + "Continuing.".format(current_name), + "DEBUG", + ) + continue + + # Catalyst Center names policy entries as + # "{policy_name}_{app_set_name}". Strip the prefix + # to extract the app set name for matching against + # user-specified targets. + if current_name.startswith(prefix): + entry_app_set_name = current_name[len(prefix):] + else: + entry_app_set_name = current_name + + # Check if we should delete all sets under this relevance + if entry_relevance in delete_all_for_relevance: + ids_list.append(policy_id) + application_set_names_deleted.append( + entry_app_set_name + ) + self.log( + "Marked '{0}' for deletion — matches " + "delete-all for relevance='{1}'. " + "Continuing.".format( + entry_app_set_name, entry_relevance + ), + "DEBUG", + ) + continue + + # Check if specific sets are targeted under this relevance + for ts_idx, (rel, app_name) in enumerate(targeted_sets): + if rel == entry_relevance and app_name == entry_app_set_name: + ids_list.append(policy_id) + application_set_names_deleted.append(app_name) + self.log( + "Match found for targeted set " + "relevance='{0}', app_name='{1}'. " + "Breaking out of loop.".format( + rel, app_name + ), + "DEBUG", + ) + break + + # Check for targeted sets that were not found + if targeted_sets: + found_names = set(application_set_names_deleted) + not_found = [ + targeted_set[1] for targeted_set in targeted_sets if targeted_set[1] not in found_names + ] + if not_found: + self.log("Targeted sets not found in policy '{0}': {1}.".format(policy_name, not_found), "WARNING") + application_set_not_present.append((policy_name, not_found)) + + if not ids_list: + self.log( + "No matching application sets found for deletion in policy '{0}'.".format( + policy_name + ), + "WARNING", + ) + continue else: - # If application_set_name is not in the config, delete the entire policy - for current_policy in current_application_policy: + # No clause provided - delete the entire policy + total_current = len(current_application_policy) + for cp_idx, current_policy in enumerate( + current_application_policy + ): if "id" in current_policy: ids_list.append(current_policy["id"]) + self.log( + "Collecting policy entry {0}/{1} " + "with ID='{2}' for full delete.".format( + cp_idx + 1, total_current, + current_policy["id"] + ), + "DEBUG", + ) try: - # Sending the list of application set or policy IDs for deletion response = self.dnac._exec( family="application_policy", function="application_policy_intent", op_modifies=True, - params={ - "deleteList": ids_list - }, # Pass the collected IDs for deletion + params={"deleteList": ids_list}, ) self.log( @@ -6302,40 +6644,42 @@ def delete_application_policy(self): ), "DEBUG", ) - self.deleted_application_policy.append(policy_name) self.check_tasks_response_status(response, "application_policy_intent") - # Proceed only if the status is successful if self.status not in ["failed", "exited"]: - # If specific application sets were provided for deletion - if application_set_names: + if clause_config and application_set_names_deleted: self.msg = "Application set(s) '{0}' removed from policy '{1}' successfully.".format( - ", ".join(application_set_names), policy_name + ", ".join(application_set_names_deleted), policy_name ) self.set_operation_result("success", True, self.msg, "INFO") - application_sets_deleted.append( - f"Application set(s) '{', '.join(application_set_names)}' removed from policy '{policy_name}'" + delete_msg = ( + "Application set(s) '{0}' removed from " + "policy '{1}'".format( + ", ".join(application_set_names_deleted), + policy_name, + ) ) + self.deleted_application_set_from_policy.append(delete_msg) + application_sets_deleted.append(delete_msg) else: - # If no application sets were specified, the whole policy is deleted + self.deleted_application_policy.append(policy_name) self.msg = ( "Application policy '{0}' deleted successfully.".format( policy_name ) ) self.set_operation_result("success", True, self.msg, "INFO") - success_msg.append(self.msg) # Track the success message + success_msg.append(self.msg) except Exception as e: self.msg = "Error occurred while deleting policy '{0}': {1}".format( policy_name, e ) self.set_operation_result("failed", False, self.msg, "ERROR") - failed_msg.append(self.msg) # Track the failed message + failed_msg.append(self.msg) final_msg = [] - # Reporting application set deletions first if application_sets_deleted: final_msg.append( "Successfully deleted the following application set(s): {0}".format( @@ -6343,23 +6687,23 @@ def delete_application_policy(self): ) ) - # Reporting missing or already deleted application sets with policy names in the required format if application_set_not_present: - # Now collect all missing sets and group by policy missing_sets_message = [] - for policy_name, missing_sets in application_set_not_present: - if missing_sets: # Ensure only policies with missing sets are reported + for ms_idx, (pol_name, missing_sets) in enumerate( + application_set_not_present + ): + if missing_sets: missing_sets_message.append( - f"'{policy_name}': [{', '.join(missing_sets)}]" + "'{0}': [{1}]".format( + pol_name, ", ".join(missing_sets) + ) ) - if missing_sets_message: final_msg.append( "The following application set(s) are not present or already deleted in policies: " + ", ".join(missing_sets_message) ) - # Reporting policy deletions if success_msg: final_msg.append( "Successfully deleted the following policy(ies): {0}".format( @@ -6373,19 +6717,18 @@ def delete_application_policy(self): ", ".join(failed_msg) ) ) - self.no_deleted_application_policy.append(policy_name) - # Join all the messages together self.msg = final_msg - # Determine final operation result - if not success_msg and failed_msg: + if not success_msg and not application_sets_deleted and failed_msg: self.set_operation_result("success", False, self.msg, "ERROR") - elif success_msg and failed_msg: - self.set_operation_result("success", True, self.msg, "INFO") else: self.set_operation_result("success", True, self.msg, "INFO") - + self.log( + "Completed application policy deletion. " + "Final message: {0}".format(self.msg), + "INFO", + ) return self def delete_application_queuing_profile(self): @@ -6782,6 +7125,12 @@ def update_all_messages(self): ) no_update_list.append(msg) + if self.deleted_application_set_from_policy: + msg = "{0} in Cisco Catalyst Center.".format( + "; ".join(self.deleted_application_set_from_policy) + ) + result_msg_list.append(msg) + if self.deleted_application_policy: msg = "Application Policy(ies) '{0}' deleted successfully from Cisco Catalyst Center.".format( "', '".join(self.deleted_application_policy) @@ -6967,29 +7316,33 @@ def verify_diff_merged(self, config): def verify_diff_deleted(self, config): """ - Verifies the deletion status of configurations in Cisco Catalyst Center. + Verify deletion of application policies or + application sets from Cisco Catalyst Center. Args: - self (object): An instance of the class used for interacting with Cisco Catalyst Center. - config (dict): The configuration dictionary containing the details to be verified, including application - queuing profiles, applications, and application policies. + self (object): An instance of the class for + interacting with Cisco Catalyst Center. + config (dict): The playbook configuration + containing application_policy details. Returns: - self: The current instance of the class, with updated 'status' and 'msg' attributes based on the verification. + self: The updated instance with verification + results in 'msg' attribute. Description: - This method checks the deletion status of configurations in Cisco Catalyst Center by comparing the current state - (have) and desired state (want) of the configuration. It verifies that the configurations, if requested for deletion, - are no longer present in the Cisco Catalyst Center. - - The method performs the following verifications: - - Ensures that the specified application queuing profile has been deleted. - - Ensures that the specified application has been deleted. - - Ensures that the specified application policy has been deleted. - - The function logs the success or failure of the deletion verification and updates the status accordingly. If the - configuration to be deleted is found to be absent in the current state, the deletion is considered successful, and - a success message is logged. + Verifies whether the delete operation + completed successfully by comparing the + current state against the desired state. + + Two verification paths: + - Full policy delete (no clause in config): + Confirms the policy no longer exists in + Catalyst Center. + - Partial delete (clause provided in config): + Confirms the policy still exists but the + targeted application sets have been + removed. Logs whether the policy persists + after partial deletion. """ self.log("Verify starts here verify diff deleted", "INFO") @@ -7052,27 +7405,71 @@ def verify_diff_deleted(self, config): self.log("Current State (have): {0}".format(str(self.have)), "INFO") self.log("Desired State (want): {0}".format(str(self.want)), "INFO") - # Code to validate ccc config for merged state - application_policy_exist = self.have.get("application_policy_exists") - application_policy_name = self.want.get("application_policy")[0].get("name") + application_policies = self.want.get("application_policy", []) - if not application_policy_exist: - self.msg = ( - "The requested application policy {0} is not present in the Cisco Catalyst Center " - "and its deletion has been verified.".format( - application_policy_name - ) - ) - self.log(self.msg, "INFO") - - else: + for index, application_policy in enumerate(application_policies, start=1): + application_policy_name = application_policy.get("name") self.log( - "The playbook input for application policy {0} does not align with the Cisco Catalyst Center, indicating that the \ - merge task may not have executed successfully.".format( - application_policy_name + "Verifying deletion for application policy entry #{0}: {1}".format( + index, application_policy_name ), "INFO", ) + + if not application_policy_name: + self.msg = ( + "The following parameter(s): 'name' could not be found and " + "are mandatory to create or update application policy." + ) + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + + clause_config = application_policy.get("clause") + + current_application_policies = self.have.get("application_policies", {}) + policy_info = current_application_policies.get( + application_policy_name, {} + ) + application_policy_exist = policy_info.get( + "application_policy_exists", False + ) + + if clause_config: + # Partial delete (app sets from policy) - policy may still exist + if application_policy_exist: + self.log( + "Application policy '{0}' still exists after partial delete. " + "Verifying that the targeted application sets were removed.".format( + application_policy_name + ), + "INFO", + ) + else: + self.log( + "Application policy '{0}' is no longer present after the delete operation.".format( + application_policy_name + ), + "INFO", + ) + else: + # Full policy delete + if not application_policy_exist: + self.msg = ( + "The requested application policy {0} is not present in the Cisco Catalyst Center " + "and its deletion has been verified.".format( + application_policy_name + ) + ) + self.log(self.msg, "INFO") + else: + self.log( + "The playbook input for application policy {0} does not align with the Cisco Catalyst Center, indicating that the " + "delete task may not have executed successfully.".format( + application_policy_name + ), + "INFO", + ) return self diff --git a/plugins/modules/assurance_device_health_score_settings_playbook_config_generator.py b/plugins/modules/assurance_device_health_score_settings_playbook_config_generator.py index 858ff873d4..b58f9f82be 100644 --- a/plugins/modules/assurance_device_health_score_settings_playbook_config_generator.py +++ b/plugins/modules/assurance_device_health_score_settings_playbook_config_generator.py @@ -109,39 +109,41 @@ - device_health_score_settings device_health_score_settings: description: - - Nested dictionary for device health score settings specific filters. - - Provides fine-grained control over device families and KPI settings - to extract from Catalyst Center. + - List of filter entries for device health score settings. + - Each entry specifies a set of device families to extract KPI + threshold settings for. + - Multiple entries are supported; device families are flattened + and deduplicated across all entries before API calls are made. - Allows targeting specific device families without extracting all configured settings. - - Modern recommended approach for filter specification in new - playbooks. - type: dict + type: list + elements: dict required: false suboptions: device_families: description: - List of specific device family names to extract KPI threshold - settings for using modern nested filter format. + settings. + - Multiple device family sets are specified by adding entries to + the parent C(device_health_score_settings) list, each with its + own C(device_families) list. - Valid device family names include C(ROUTER) for routing devices, C(SWITCH_AND_HUB) for switching infrastructure, C(WIRELESS_CONTROLLER) for wireless LAN controllers, C(UNIFIED_AP) for wireless access points, C(WIRELESS_CLIENT) for wireless client devices, and C(WIRED_CLIENT) for wired client devices. - - If not specified, all device families with configured KPI - threshold settings will be extracted for comprehensive - brownfield documentation. - - Duplicate device family values are automatically removed while - preserving the original order of unique entries. - - Each device family may have different KPI metrics and - thresholds based on device capabilities and health monitoring - requirements. - - Example filter C(["UNIFIED_AP", "ROUTER", "SWITCH_AND_HUB", - "WIRELESS_CONTROLLER"]) extracts settings for wireless and - wired infrastructure. + - If not specified across any entry, all device families with + configured KPI threshold settings will be extracted. + - Duplicate device family values across entries are automatically + removed while preserving the original order of first occurrence. + - When omitted from a single entry but present in others, only + the families specified in the remaining entries are used; an + entry without C(device_families) does not broaden the filter to + all families. - Device family names are case-sensitive and must match exact names used in Catalyst Center. + For example, C(UNIFIED_AP) not C(unified_ap). type: list elements: str choices: @@ -190,6 +192,24 @@ increased C(dnac_api_task_timeout) values for complete data extraction. - Generated YAML structure follows ordered dictionary format to maintain consistent key ordering across multiple generations. +- |- + Module result behavior (changed/ok/failed): + The module result reflects local file state only, not Catalyst Center state. + In overwrite mode, the full file content is compared (excluding volatile + fields like timestamps and playbook path). In append mode, only the last + YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous + appends, only the most recent entry is used for the idempotency check. + - changed=true (status: success): The generated YAML configuration differs + from the existing output file (or the file does not exist). The file was + written and the configuration was updated. + - changed=false (status: ok): The generated YAML configuration matches the + existing output file content. The write was skipped as the file is + already up-to-date. + - failed=true (status: failed): The module encountered a validation error, + API failure, or file write error. No file was written or modified. + Note: Re-running with identical inputs and unchanged Catalyst Center state + will produce changed=false, ensuring idempotent playbook behavior. seealso: - module: cisco.dnac.assurance_device_health_score_settings_workflow_manager @@ -266,7 +286,7 @@ component_specific_filters: components_list: ["device_health_score_settings"] device_health_score_settings: - device_families: ["UNIFIED_AP", "ROUTER", "SWITCH_AND_HUB", "WIRELESS_CONTROLLER"] + - device_families: ["UNIFIED_AP", "ROUTER", "SWITCH_AND_HUB", "WIRELESS_CONTROLLER"] - name: Generate YAML Configuration with implicit component auto-add cisco.dnac.assurance_device_health_score_settings_playbook_config_generator: @@ -285,7 +305,55 @@ config: component_specific_filters: device_health_score_settings: - device_families: ["UNIFIED_AP", "ROUTER"] + - device_families: ["UNIFIED_AP", "ROUTER"] + +- name: Generate YAML Configuration with deduplication across entries + cisco.dnac.assurance_device_health_score_settings_playbook_config_generator: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + dnac_log_level: "{{dnac_log_level}}" + state: gathered + file_path: "/tmp/deduped_device_health_score_settings.yml" + file_mode: overwrite + config: + component_specific_filters: + components_list: ["device_health_score_settings"] + device_health_score_settings: + - device_families: ["ROUTER", "UNIFIED_AP"] + - device_families: ["UNIFIED_AP"] # UNIFIED_AP is a duplicate and will be removed + +- name: > + Generate YAML Configuration showing that an entry without device_families + does NOT expand the filter to all families. + Only ROUTER and SWITCH_AND_HUB (from the first entry) are fetched. + The second entry omits device_families entirely but does not cause all + device families to be included - it is simply ignored for filtering + purposes. Result is identical to specifying only the first entry. + cisco.dnac.assurance_device_health_score_settings_playbook_config_generator: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + dnac_log_level: "{{dnac_log_level}}" + state: gathered + file_path: "/tmp/partial_omission_device_health_score_settings.yml" + file_mode: overwrite + config: + component_specific_filters: + components_list: ["device_health_score_settings"] + device_health_score_settings: + - device_families: ["ROUTER", "SWITCH_AND_HUB"] # only these two families are fetched + - {} # entry with no device_families key - does NOT add "all families" to the filter """ RETURN = r""" @@ -764,7 +832,8 @@ def validate_component_specific_filters(self, component_specific_filters): Args: component_specific_filters (dict): Component filters configuration containing: - components_list (list, optional): List of component names to process - - device_health_score_settings (dict, optional): Nested filters with: + - device_health_score_settings (list[dict], optional): List of filter + entries, each a dict with: - device_families (list, optional): Device families within settings Returns: @@ -924,88 +993,132 @@ def validate_component_specific_filters(self, component_specific_filters): self.set_operation_result("failed", False, self.msg, "ERROR") return False - # Validate device_health_score_settings if provided (nested structure) + # Validate device_health_score_settings if provided (list of dicts structure) if 'device_health_score_settings' in component_specific_filters: self.log( "device_health_score_settings parameter found in component_specific_filters. " - "Starting validation of nested device_health_score_settings structure. This " - "represents nested filtering configuration for device health score settings " - "component with component-specific filter parameters.", + "Starting validation as list of dicts structure. Each entry must be a dict " + "with an optional device_families key.", "DEBUG" ) device_health_score_settings = component_specific_filters['device_health_score_settings'] + + # Normalize None to empty list if device_health_score_settings is None: - device_health_score_settings = {} + device_health_score_settings = [] component_specific_filters["device_health_score_settings"] = device_health_score_settings - if not isinstance(device_health_score_settings, dict): + + if not isinstance(device_health_score_settings, list): self.msg = ( - "component_specific_filters.device_health_score_settings must be a " - "dictionary. Received type: {0}. Please provide device_health_score_settings " - "as dictionary structure with valid nested parameter keys and values." + "component_specific_filters.device_health_score_settings must be a list " + "of dictionaries. Received type: {0}. Example: device_health_score_settings: " + "[{{device_families: [ROUTER, UNIFIED_AP]}}]." ).format(type(device_health_score_settings).__name__) self.log(self.msg, "ERROR") self.set_operation_result("failed", False, self.msg, "ERROR") return False self.log( - "device_health_score_settings dictionary type validation passed. Proceeding with " - "nested parameter validation including device_families and other component-specific " - "filter options.", + "device_health_score_settings list type validation passed. Processing {0} " + "entries. Step 1: deduplicate whole entries via brownfield_helper, " + "Step 2: flatten and deduplicate device_families across remaining entries.".format( + len(device_health_score_settings) + ), "DEBUG" ) - # Validate device_families within device_health_score_settings - if 'device_families' in device_health_score_settings: - self.log( - "device_families parameter found within nested device_health_score_settings " - "structure. Starting validation using validate_device_families_parameter() " - "method to ensure proper list structure and string element types.", - "DEBUG" - ) - if not self.validate_device_families_parameter( - device_health_score_settings['device_families'] - ): - self.log( - "Nested device_families parameter validation failed. " - "validate_device_families_parameter() returned False. Operation result " - "already set to failed. Returning False to indicate validation failure.", - "ERROR" - ) - return False + # Step 1 — use brownfield_helper.deduplicate_component_filters() to remove + # exact duplicate dict entries (e.g. two identical {device_families: [ROUTER]} blocks). + # This modifies component_specific_filters["device_health_score_settings"] in-place. + self.deduplicate_component_filters(component_specific_filters) + device_health_score_settings = component_specific_filters['device_health_score_settings'] self.log( - "Nested device_families parameter validation passed successfully. Parameter " - "structure and values within device_health_score_settings conform to schema.", + "After brownfield_helper deduplication: {0} entries remain.".format( + len(device_health_score_settings) + ), "DEBUG" ) - # Check for invalid nested parameters + # Step 2 — validate each entry and flatten device_families across all entries, + # deduplicating individual family strings (preserving first-occurrence order). allowed_nested_params = {'device_families'} - self.log( - "Allowed nested parameters within device_health_score_settings defined: {0}. " - "Total allowed nested parameters: {1}. Checking for invalid or unrecognized " - "parameter names in nested structure.".format( - ", ".join(sorted(allowed_nested_params)), - len(allowed_nested_params) - ), - "DEBUG" - ) - invalid_nested_params = set(device_health_score_settings.keys()) - allowed_nested_params - if invalid_nested_params: - self.msg = ( - "Invalid device_health_score_settings parameter(s) found: {0}. These " - "nested parameter names are not recognized within device_health_score_settings " - "structure. Allowed parameters are: {1}. Please check for typos and ensure " - "only supported nested parameters are used." - ).format( - ", ".join(sorted(invalid_nested_params)), - ", ".join(sorted(allowed_nested_params)) + all_device_families = [] + seen_families = set() + + for entry_index, entry in enumerate(device_health_score_settings, start=1): + if not isinstance(entry, dict): + self.msg = ( + "Each entry in device_health_score_settings must be a dictionary. " + "Entry at index {0} has invalid type: {1}. Expected dict with optional " + "device_families key." + ).format(entry_index - 1, type(entry).__name__) + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Check for unrecognized keys in this entry + invalid_nested_params = set(entry.keys()) - allowed_nested_params + if invalid_nested_params: + self.msg = ( + "Invalid key(s) found in device_health_score_settings entry at index {0}: " + "{1}. Allowed keys per entry are: {2}." + ).format( + entry_index - 1, + ", ".join(sorted(invalid_nested_params)), + ", ".join(sorted(allowed_nested_params)) + ) + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Validate and collect device_families from this entry + if 'device_families' in entry: + self.log( + "Validating device_families in entry {0}/{1}: {2}.".format( + entry_index, len(device_health_score_settings), + entry['device_families'] + ), + "DEBUG" + ) + if not self.validate_device_families_parameter(entry['device_families']): + self.log( + "device_families validation failed at entry index {0}. " + "Returning False.".format(entry_index - 1), + "ERROR" + ) + return False + + # Flatten and deduplicate individual family names across all entries + for family in entry['device_families']: + if family not in seen_families: + seen_families.add(family) + all_device_families.append(family) + else: + self.log( + "Duplicate device family '{0}' found in entry {1}. " + "Skipping to preserve unique list.".format( + family, entry_index - 1 + ), + "WARNING" + ) + + if len(all_device_families) < sum( + len(e.get('device_families', [])) for e in device_health_score_settings if isinstance(e, dict) + ): + self.log( + "Duplicates removed across device_health_score_settings entries. " + "Final deduplicated device_families: {0}.".format(all_device_families), + "INFO" ) - self.log(self.msg, "ERROR") - self.set_operation_result("failed", False, self.msg, "ERROR") - return False + + # Normalize back to a single canonical dict for downstream use by + # get_device_health_score_settings() and apply_health_score_filters() + component_specific_filters["device_health_score_settings"] = { + "device_families": all_device_families + } self.log( - "device_health_score_settings nested parameter validation passed. All nested " - "parameters are recognized and allowed by module schema.", + "device_health_score_settings validation completed. Normalized to " + "device_families: {0}.".format(all_device_families), "DEBUG" ) @@ -1765,39 +1878,27 @@ def get_device_health_score_settings(self, network_element, filters): # Prepare API parameters api_params = {} - component_specific_filters = filters.get("component_specific_filters", {}) - device_families = [] + # brownfield_helper.yaml_config_generator passes csf.get("device_health_score_settings", {}) + # directly as filters["component_specific_filters"], which after validation normalization + # is {"device_families": [...]} or {}. No need for the two-level lookup anymore. + health_score_filters = filters.get("component_specific_filters") or {} + device_families = health_score_filters.get("device_families", []) - # Check for nested device_health_score_settings structure - health_score_filters = component_specific_filters.get("device_health_score_settings", {}) or {} - if health_score_filters.get("device_families"): - device_families = health_score_filters["device_families"] + if device_families: self.log( - "Found {0} device families in device_health_score_settings filters: {1}. " + "Found {0} device families in filters: {1}. " "Will execute separate API calls for each device family.".format( len(device_families), device_families ), "DEBUG" ) - # Check for components_list - if only components_list is present without device_families - components_list = component_specific_filters.get("components_list", []) - if "device_health_score_settings" in components_list: - if not device_families: - self.log( - "components_list contains device_health_score_settings without " - "device families filter. Will retrieve all device families from " - "Catalyst Center.", - "DEBUG" - ) - else: - self.log( - "components_list contains device_health_score_settings with {0} " - "device families: {1}.".format( - len(device_families), device_families - ), - "DEBUG" - ) + else: + self.log( + "No device families filter specified. Will retrieve all device " + "families from Catalyst Center.", + "DEBUG" + ) try: # Collect all response data from multiple API calls @@ -2038,11 +2139,6 @@ def get_device_health_score_settings(self, network_element, filters): ), "INFO" ) - final_result = { - "device_health_score_settings": device_health_score_list, - "operation_summary": self.get_operation_summary() - } - self.log( "Device health score settings retrieval completed successfully. " "Total configurations: {0}, Device families: {1}.".format( @@ -2051,7 +2147,10 @@ def get_device_health_score_settings(self, network_element, filters): ), "INFO" ) - return final_result + # Return as list so brownfield_helper.yaml_config_generator can call + # final_config_list.extend(component_data), producing: + # {"config": [{"device_health_score": [...]}]} + return [{"device_health_score": device_health_score_list}] else: self.log( @@ -2074,10 +2173,9 @@ def get_device_health_score_settings(self, network_element, filters): "error_code": "API_EXCEPTION_ERROR" }) - return { - "device_health_score_settings": [], - "operation_summary": self.get_operation_summary() - } + # Return empty list so brownfield_helper.yaml_config_generator sees no data + # and sets ok/False (no change) rather than attempting to write an empty file. + return [] def apply_health_score_filters(self, response_data, component_specific_filters): """ @@ -2221,470 +2319,6 @@ def apply_health_score_filters(self, response_data, component_specific_filters): ) return filtered_data - def yaml_config_generator(self, yaml_config_generator): - """ - Generates YAML configuration file for device health score settings. - - This function orchestrates the complete YAML generation workflow including file path - determination, filter processing, configuration retrieval from Catalyst Center, - data transformation using reverse mapping specifications, operation summary - consolidation, and YAML file generation with comprehensive header comments. - Supports auto-discovery mode and targeted filtering with detailed error handling. - - Args: - yaml_config_generator (dict): Configuration parameters containing: - - file_path (str, optional): Output file path - - file_mode (str, optional): Output file mode - - generate_all_configurations (bool): Internal auto-discovery mode - - component_specific_filters (dict, optional): Targeted extraction - - Returns: - object: Self instance with updated attributes: - - self.msg: Operation result message with file path and statistics - - self.status: Operation status ("success", "failed", or "ok") - - Operation result set via set_operation_result() - """ - self.log( - "Starting YAML configuration generation workflow with parameters: {0}. " - "Workflow orchestrates file path determination, filter processing, " - "configuration retrieval, operation summary consolidation, and YAML file " - "generation with header comments.".format(yaml_config_generator), - "DEBUG" - ) - - # Check if generate_all_configurations mode is enabled - generate_all = yaml_config_generator.get("generate_all_configurations", False) - self.log( - "Auto-discovery mode evaluation: generate_all_configurations={0}. When " - "enabled, overrides all filters to retrieve complete device health score " - "settings inventory from Catalyst Center for brownfield documentation.".format( - generate_all - ), - "DEBUG" - ) - if generate_all: - self.log( - "Auto-discovery mode enabled. Will process all device health score " - "settings without filtering restrictions for complete infrastructure " - "discovery and documentation.", - "INFO" - ) - - self.log( - "Determining output file path for YAML configuration. Checking if user " - "provided file_path parameter in top-level module input.", - "DEBUG" - ) - file_path = yaml_config_generator.get("file_path") - if not file_path: - self.log( - "No file_path provided in top-level module input. Generating default " - "filename with module name and timestamp for unique identification.", - "DEBUG" - ) - file_path = self.generate_filename() - self.log( - "Generated default filename: {0}. File will be created in current " - "working directory.".format(file_path), - "DEBUG" - ) - else: - self.log( - "Using user-provided file_path: {0}. Path may be absolute or relative " - "to current working directory.".format(file_path), - "DEBUG" - ) - - file_mode = yaml_config_generator.get("file_mode", "overwrite") - - self.log( - "YAML configuration output file path determined: {0}, file_mode: {1}. Path will be used " - "for writing final configuration with header comments.".format(file_path, file_mode), - "INFO" - ) - - if generate_all: - self.log( - "Auto-discovery mode active. Overriding any user-provided filters to " - "retrieve all device health score settings from Catalyst Center. " - "component_specific_filters will be set to empty dictionary.", - "INFO" - ) - component_specific_filters = {} - else: - self.log( - "Standard mode active. Processing user-provided filters for targeted " - "device health score settings retrieval using component_specific_filters.", - "DEBUG" - ) - - component_specific_filters = ( - yaml_config_generator.get("component_specific_filters") or {} - ) - - self.log( - "Component specific filters determined: {0}. Filters will be applied " - "during device health score settings retrieval for targeted configuration " - "extraction.".format(component_specific_filters), - "DEBUG" - ) - - self.log( - "Retrieving supported network elements schema configuration from module " - "schema definition. Schema contains API configuration, filter specifications, " - "and reverse mapping functions for each component.", - "DEBUG" - ) - module_supported_network_elements = self.module_schema.get("network_elements", {}) - - self.log( - "Initializing final configuration list and consolidated operation summary " - "tracking structures. These structures will accumulate configurations and " - "statistics from all processed components.", - "DEBUG" - ) - final_list = [] - consolidated_operation_summary = { - "total_device_families_processed": 0, - "total_kpis_processed": 0, - "total_successful_operations": 0, - "total_failed_operations": 0, - "device_families_with_complete_success": [], - "device_families_with_partial_success": [], - "device_families_with_complete_failure": [], - "success_details": [], - "failure_details": [] - } - - self.log( - "Tracking structures initialized successfully. final_list=[], " - "consolidated_operation_summary with zero counters ready for accumulation.", - "DEBUG" - ) - - # Process device health score settings - component = "device_health_score_settings" - self.log( - "Starting processing for component: {0}. Retrieving network element " - "configuration from module schema for API execution and data transformation.".format( - component - ), - "INFO" - ) - network_element = module_supported_network_elements.get(component) - - if network_element: - self.log( - "Network element configuration found for component {0}. Configuration " - "includes api_family={1}, api_function={2}, and reverse mapping function. " - "Preparing component-specific filter structure.".format( - component, - network_element.get("api_family"), - network_element.get("api_function") - ), - "DEBUG" - ) - - # Prepare component filters structure - self.log( - "Constructing component filter structure with component_specific_filters " - "for API execution. Structure format: {{'component_specific_filters': " - "{0}}}.".format(component_specific_filters), - "DEBUG" - ) - # Pass the component_specific_filters directly to match the expected structure - component_filters = { - "component_specific_filters": component_specific_filters - } - - self.log("Executing component operation function to retrieve details", "DEBUG") - operation_func = network_element.get("get_function_name") - details = operation_func(network_element, component_filters) - self.log( - "Component operation function execution completed for {0}. Retrieved " - "configurations count: {1}, operation_summary available: {2}.".format( - component, - len(details.get("device_health_score_settings", [])), - bool(details.get("operation_summary")) - ), - "INFO" - ) - - # Process retrieved configurations - if details and details.get("device_health_score_settings"): - config_count = len(details["device_health_score_settings"]) - - self.log( - "Adding {0} device health score configurations from component {1} " - "to final list. Configurations include device_family, kpi_name, " - "threshold_value, and other settings.".format( - config_count, component - ), - "DEBUG" - ) - - final_list.extend(details["device_health_score_settings"]) - - self.log( - "Successfully added configurations to final list. Total configurations " - "in final_list: {0}.".format(len(final_list)), - "DEBUG" - ) - else: - self.log( - "No device_health_score_settings configurations found in component " - "operation response for {0}. final_list remains unchanged.".format( - component - ), - "WARNING" - ) - - # Consolidate operation summary - if details and details.get("operation_summary"): - self.log( - "Consolidating operation summary from component {0} response. " - "Summary includes success/failure statistics and device family " - "categorization for comprehensive reporting.".format(component), - "DEBUG" - ) - summary = details["operation_summary"] - consolidated_operation_summary.update(summary) - self.log( - "Operation summary consolidated successfully. Statistics: " - "total_device_families_processed={0}, total_kpis_processed={1}, " - "total_successful_operations={2}, total_failed_operations={3}.".format( - consolidated_operation_summary["total_device_families_processed"], - consolidated_operation_summary["total_kpis_processed"], - consolidated_operation_summary["total_successful_operations"], - consolidated_operation_summary["total_failed_operations"] - ), - "DEBUG" - ) - else: - self.log( - "No operation_summary available in component response for {0}. " - "Consolidated summary retains initial zero values.".format(component), - "DEBUG" - ) - else: - self.log( - "Network element configuration not found for component {0} in module " - "schema. Component will be skipped in processing workflow.".format( - component - ), - "ERROR" - ) - - self.log( - "Creating final dictionary structure for YAML output. Structure follows " - "assurance_device_health_score_settings_workflow_manager expected format " - "with config list containing device_health_score configurations.", - "DEBUG" - ) - final_dict = OrderedDict() - - # Format the configuration properly according to the required structure - # Changed to match expected format: config: - device_health_score: [list] - if final_list: - self.log( - "Formatting {0} configurations into expected YAML structure: config -> " - "list with device_health_score key. Structure matches module input " - "requirements.".format(len(final_list)), - "DEBUG" - ) - final_dict["config"] = [{"device_health_score": final_list}] - else: - self.log( - "No configurations available in final_list. Creating empty YAML " - "structure: config -> list with empty device_health_score array.", - "WARNING" - ) - final_dict["config"] = [{"device_health_score": []}] - - if not final_list: - self.log( - "No configurations found to process after component retrieval. Setting " - "appropriate result message indicating no data available for specified " - "filters or auto-discovery mode.", - "WARNING" - ) - - self.msg = { - "message": ( - "No configurations or components to process for module '{0}'. " - "Verify input filters or configuration.".format(self.module_name) - ), - "file_path": file_path, - "operation_summary": consolidated_operation_summary - } - - self.log( - "Setting operation result to 'ok' status for empty configuration " - "scenario. Result message: {0}".format(self.msg["message"]), - "INFO" - ) - - self.set_operation_result("ok", False, self.msg, "INFO") - return self - else: - self.log( - "YAML file write operation failed. Unable to write configuration to " - "file path: {0}. Check file permissions, directory existence, and disk " - "space availability.".format(file_path), - "ERROR" - ) - - self.msg = { - "message": ( - "YAML config generation failed for module '{0}' - unable to write " - "to file.".format(self.module_name) - ), - "file_path": file_path, - "operation_summary": consolidated_operation_summary - } - - self.log( - "Setting operation result to 'failed' with changed=True due to file " - "write failure. Message: {0}".format(self.msg["message"]), - "ERROR" - ) - - self.set_operation_result("failed", True, self.msg, "ERROR") - - self.log( - "Final dictionary structure created successfully with {0} total " - "configurations. Dictionary ready for YAML serialization with header " - "comments.".format(len(final_list)), - "INFO" - ) - - # Determine if operation should be considered failed based on partial or complete failures - has_partial_failures = len(consolidated_operation_summary["device_families_with_partial_success"]) > 0 - has_complete_failures = len(consolidated_operation_summary["device_families_with_complete_failure"]) > 0 - has_any_failures = consolidated_operation_summary["total_failed_operations"] > 0 - - self.log( - "Evaluating operation status for failure detection. Partial failures: {0}, " - "Complete failures: {1}, Total failed operations: {2}. Status determination " - "will affect final result reporting.".format( - has_partial_failures, has_complete_failures, - consolidated_operation_summary["total_failed_operations"] - ), - "DEBUG" - ) - - # Write YAML file with header - self.log( - "Initiating YAML file write operation to path: {0}. Operation includes " - "header comment generation with metadata and configuration summary, followed " - "by YAML serialization of final_dict structure.".format(file_path), - "INFO" - ) - - self.log("Attempting to write final dictionary to YAML file", "DEBUG") - if self.write_dict_to_yaml(final_dict, file_path, file_mode): - self.log( - "YAML file write operation completed successfully. File created at: {0} " - "with {1} configurations and header comments.".format( - file_path, len(final_list) - ), - "INFO" - ) - self.log( - "YAML file write operation completed successfully. File created at: {0} " - "with {1} configurations and header comments.".format( - file_path, len(final_list) - ), - "INFO" - ) - - # Determine final operation status - if has_partial_failures or has_complete_failures or has_any_failures: - self.log( - "Operation contains failures detected. Setting final status to " - "'failed' for comprehensive error reporting. Partial failures: {0}, " - "Complete failures: {1}, Total failures: {2}.".format( - has_partial_failures, has_complete_failures, has_any_failures - ), - "WARNING" - ) - - self.msg = { - "message": ( - "YAML config generation completed with failures for module " - "'{0}'. Check operation_summary for details.".format( - self.module_name - ) - ), - "file_path": file_path, - "configurations_generated": len(final_list), - "operation_summary": consolidated_operation_summary - } - - self.log( - "Setting operation result to 'failed' with changed=True. Message: " - "{0}. Users should review operation_summary for failure details.".format( - self.msg["message"] - ), - "ERROR" - ) - self.set_operation_result("failed", True, self.msg, "ERROR") - else: - self.log( - "Setting operation result to 'success' with changed=True. Generated " - "YAML file contains {0} configurations at {1}.".format( - len(final_list), file_path - ), - "INFO" - ) - self.msg = { - "message": "YAML config generation succeeded for module '{0}'.".format(self.module_name), - "file_path": file_path, - "configurations_generated": len(final_list), - "operation_summary": consolidated_operation_summary - } - self.set_operation_result("success", True, self.msg, "INFO") - else: - self.log( - "Operation completed successfully without failures. All {0} device " - "families processed successfully with {1} total KPI configurations.".format( - consolidated_operation_summary["total_device_families_processed"], - consolidated_operation_summary["total_kpis_processed"] - ), - "INFO" - ) - - self.msg = { - "message": ( - "YAML config generation succeeded for module '{0}'.".format( - self.module_name - ) - ), - "file_path": file_path, - "configurations_generated": len(final_list), - "operation_summary": consolidated_operation_summary - } - - self.log( - "Setting operation result to 'success' with changed=True. Generated " - "YAML file contains {0} configurations at {1}.".format( - len(final_list), file_path - ), - "INFO" - ) - self.set_operation_result("failed", True, self.msg, "ERROR") - - self.log( - "YAML configuration generation workflow completed. Final status: {0}, " - "Configurations generated: {1}, File path: {2}.".format( - "success" if not (has_partial_failures or has_complete_failures or - has_any_failures) and len(final_list) > 0 else "failed", - len(final_list), file_path - ), - "INFO" - ) - return self - def get_want(self, config, state): """ Prepares API call parameters based on playbook configuration and state. diff --git a/plugins/modules/assurance_issue_playbook_config_generator.py b/plugins/modules/assurance_issue_playbook_config_generator.py index 6843036dea..a306f07536 100644 --- a/plugins/modules/assurance_issue_playbook_config_generator.py +++ b/plugins/modules/assurance_issue_playbook_config_generator.py @@ -380,6 +380,92 @@ def validate_input(self): self.status = "failed" return self.check_return_status() + # Normalize duplicate components while preserving order. + if isinstance(components_list, list): + self.log( + "Normalizing components_list with {0} candidate entries.".format( + len(components_list) + ), + "DEBUG" + ) + deduplicated_components_list = [] + seen_components = set() + for component_index, component_name in enumerate(components_list, start=1): + if component_name in seen_components: + self.log( + "Skipping duplicate components_list entry at index {0}: {1}".format( + component_index, component_name + ), + "DEBUG" + ) + continue + + seen_components.add(component_name) + deduplicated_components_list.append(component_name) + + if len(deduplicated_components_list) != len(components_list): + self.log( + ( + "Removing duplicate entries from components_list: " + "{0} entries reduced to {1} unique entries to avoid " + "redundant processing." + ).format( + len(components_list), len(deduplicated_components_list) + ), + "INFO" + ) + component_filters["components_list"] = deduplicated_components_list + valid_temp["component_specific_filters"] = component_filters + + # Normalize duplicate issue filter blocks while preserving order. + issue_filters = component_filters.get("assurance_user_defined_issue_settings") + if isinstance(issue_filters, list): + self.log( + "Normalizing assurance_user_defined_issue_settings filters with {0} candidate entries.".format( + len(issue_filters) + ), + "DEBUG" + ) + deduplicated_issue_filters = [] + seen_filter_keys = set() + for filter_index, item in enumerate(issue_filters, start=1): + if not isinstance(item, dict): + self.log( + "Retaining non-dict filter at index {0}: {1}".format(filter_index, item), + "DEBUG" + ) + deduplicated_issue_filters.append(item) + continue + + filter_key = ( + item.get("name"), + item.get("is_enabled") + ) + if filter_key in seen_filter_keys: + self.log( + "Skipping duplicate assurance_user_defined_issue_settings filter at index {0} with key {1}".format( + filter_index, filter_key + ), + "DEBUG" + ) + continue + + seen_filter_keys.add(filter_key) + deduplicated_issue_filters.append(item) + if len(deduplicated_issue_filters) != len(issue_filters): + self.log( + ( + "Deduplicated assurance_user_defined_issue_settings " + "filters from {0} to {1} entries to prevent repeated " + "API calls for the same filter combination." + ).format( + len(issue_filters), len(deduplicated_issue_filters) + ), + "INFO" + ) + component_filters["assurance_user_defined_issue_settings"] = deduplicated_issue_filters + valid_temp["component_specific_filters"] = component_filters + # Set the validated configuration and update the result with success status self.validated_config = valid_temp self.msg = "Successfully validated playbook configuration parameters using 'validated_input': {0}".format( @@ -1026,6 +1112,53 @@ def get_user_defined_issues(self, issue_element, filters): else: component_specific_filters = [] + # Normalize duplicate component filter blocks to avoid repeated API calls. + if isinstance(component_specific_filters, list): + self.log( + "Normalizing component-specific user issue filters with {0} candidate entries.".format( + len(component_specific_filters) + ), + "DEBUG" + ) + deduplicated_filters = [] + seen_filter_keys = set() + for filter_index, item in enumerate(component_specific_filters, start=1): + if not isinstance(item, dict): + self.log( + "Retaining non-dict component-specific filter at index {0}: {1}".format( + filter_index, item + ), + "DEBUG" + ) + deduplicated_filters.append(item) + self.log("Removed {0} duplicate filter entries in component-specific filters before API calls.".format( + len(component_specific_filters) - len(deduplicated_filters) + ), "INFO") + continue + + filter_key = (item.get("name"), item.get("is_enabled")) + if filter_key in seen_filter_keys: + self.log( + "Skipping duplicate component-specific filter at index {0} with key {1}".format( + filter_index, filter_key + ), + "DEBUG" + ) + continue + + seen_filter_keys.add(filter_key) + deduplicated_filters.append(item) + + if len(deduplicated_filters) != len(component_specific_filters): + self.log( + "Deduplicated component-specific filters from {0} to {1} entries.".format( + len(component_specific_filters), len(deduplicated_filters) + ), + "INFO" + ) + + component_specific_filters = deduplicated_filters + self.log( "Component-specific filters count: {0}".format(len(component_specific_filters)), "DEBUG" @@ -1080,6 +1213,54 @@ def get_user_defined_issues(self, issue_element, filters): self._fetch_all_priority_enabled_combinations(api_family, api_function) ) + # Deduplicate merged issue entries (same issue can be returned across repeated filters). + deduplicated_user_issues = [] + seen_issues = set() + self.log( + "Deduplicated final issue results from {0} to {1} entries to remove cross-filter duplicates.".format( + len(final_user_issues), len(deduplicated_user_issues) + ), + "DEBUG" + ) + for issue_index, issue in enumerate(final_user_issues, start=1): + if not isinstance(issue, dict): + self.log( + "Retaining non-dict merged issue entry at index {0}: {1}".format( + issue_index, issue + ), + "DEBUG" + ) + deduplicated_user_issues.append(issue) + continue + + issue_key = ( + issue.get("name"), + issue.get("isEnabled"), + issue.get("priority") + ) + if issue_key in seen_issues: + self.log( + "Skipping duplicate merged issue entry at index {0} with key {1}".format( + issue_index, issue_key + ), + "DEBUG" + ) + continue + + seen_issues.add(issue_key) + deduplicated_user_issues.append(issue) + + if len(deduplicated_user_issues) != len(final_user_issues): + self.log( + "Deduplicated merged issue entries from {0} to {1}.".format( + len(final_user_issues), len(deduplicated_user_issues) + ), + "INFO" + ) + else: + self.log("No duplicate merged issue entries found.", "DEBUG") + final_user_issues = deduplicated_user_issues + # Track success self.add_success("assurance_user_defined_issue_settings", { "issues_processed": len(final_user_issues) @@ -1334,6 +1515,10 @@ def get_diff_gathered(self): component_filters = config.get("component_specific_filters", {}) or {} components_list = component_filters.get("components_list", []) + # Safety normalization to avoid duplicate processing in gathered flow. + if isinstance(components_list, list): + components_list = list(dict.fromkeys(components_list)) + # Validate components_list to check for unexpected components if components_list: expected_components = ["assurance_user_defined_issue_settings"] @@ -1431,6 +1616,21 @@ def get_diff_gathered(self): ) all_configs.append({component_name: component_data}) + # If nothing matched, do not generate/write any output file. + if not all_configs: + self.msg = ( + "No configurations found for module '{0}' with the provided filters. " + "No output file was generated." + ).format(self.module_name) + self.result["changed"] = False + self.result["response"] = { + "message": self.msg, + "configurations_generated": 0 + } + self.result["msg"] = self.msg + self.status = "success" + return self + # Generate final YAML structure yaml_config = {} @@ -1467,14 +1667,6 @@ def get_diff_gathered(self): final_list.append(config_item) yaml_config = {"config": final_list} - else: - # Generate empty template structure when no configurations found and not in generate_all mode - final_list = [] - issue_elements = self.module_schema.get("issue_elements", {}) - for component_name in issue_elements.keys(): - component_dict = {component_name: []} - final_list.append(component_dict) - yaml_config = {"config": final_list} # Write to YAML file with header comments if yaml_config: @@ -1485,10 +1677,8 @@ def get_diff_gathered(self): ) success = self.write_dict_to_yaml(yaml_config, file_path, file_mode) if success: - if all_configs: - self.msg = "YAML config generation succeeded for module '{0}'.".format(self.module_name) - else: - self.msg = "YAML config generation completed for module '{0}' with empty template (no configurations found).".format(self.module_name) + self.msg = "YAML config generation succeeded for module '{0}'.".format(self.module_name) + self.result["changed"] = True self.result["response"] = { "message": self.msg, "file_path": file_path, @@ -1499,6 +1689,7 @@ def get_diff_gathered(self): self.status = "success" else: self.msg = "Failed to write YAML configuration to file: {0}".format(file_path) + self.result["changed"] = False self.result["response"] = {"message": self.msg} self.result["msg"] = self.msg self.status = "failed" @@ -1506,6 +1697,7 @@ def get_diff_gathered(self): operation_summary = self.get_operation_summary() self.msg = "No configurations or components to process for module '{0}'. Verify input filters or configuration.".format( self.module_name) + self.result["changed"] = False self.result["response"] = { "message": self.msg, "operation_summary": operation_summary diff --git a/plugins/modules/events_and_notifications_playbook_config_generator.py b/plugins/modules/events_and_notifications_playbook_config_generator.py index 738eb7b218..3751056403 100644 --- a/plugins/modules/events_and_notifications_playbook_config_generator.py +++ b/plugins/modules/events_and_notifications_playbook_config_generator.py @@ -152,6 +152,11 @@ description: - List of exact destination names to filter from retrieved configurations. + - When C(destination_names) is provided, you must target at + least one destination component using either + C(destination_types) or C(components_list) with one or more + of C(webhook_destinations), C(email_destinations), + C(syslog_destinations), C(snmp_destinations). - Names must match exactly as configured in Catalyst Center (case-sensitive). - Only components listed in components_list are retrieved. @@ -212,6 +217,11 @@ description: - List of exact event subscription names to filter from retrieved configurations. + - When C(subscription_names) is provided, you must target at + least one notification component using either + C(notification_types) or C(components_list) with one or more + of C(webhook_event_notifications), + C(email_event_notifications), C(syslog_event_notifications). - Names must match exactly as configured in Catalyst Center event subscriptions. - Filters webhook, email, and syslog event notifications based @@ -285,16 +295,16 @@ - event_management.Events.get_event_artifacts - sites.Sites.get_site - Paths used are - - GET /dna/system/api/v1/event/webhook - - GET /dna/system/api/v1/event/email-config - - GET /dna/system/api/v1/event/syslog-config - - GET /dna/system/api/v1/event/snmp-config - - GET /dna/system/api/v1/event/itsm-integration-setting - - GET /dna/system/api/v1/event/subscription/rest - - GET /dna/system/api/v1/event/subscription/email - - GET /dna/system/api/v1/event/subscription/syslog - - GET /dna/intent/api/v1/event-artifact - - GET /dna/intent/api/v1/site + - GET /dna/intent/api/v1/event/webhook + - GET /dna/intent/api/v1/event/email-config + - GET /dna/intent/api/v1/event/syslog-config + - GET /dna/intent/api/v1/dna-event/snmp-config + - GET /dna/intent/api/v1/integration-settings/itsm/instances + - GET /dna/intent/api/v1/event/subscription/rest + - GET /dna/intent/api/v1/event/subscription/email + - GET /dna/intent/api/v1/event/subscription/syslog + - GET /dna/system/api/v1/event/artifact + - GET /dna/intent/api/v1/sites - Minimum Catalyst Center version required is 2.3.5.3 for events and notifications APIs. - Module performs read-only operations and does not modify Catalyst Center @@ -305,8 +315,8 @@ readability. - Event IDs are automatically resolved to event names using Event Artifacts API. -- Pagination is automatically handled for large datasets in webhook, SNMP, - and event subscriptions. +- Pagination is automatically handled for large datasets in webhook, syslog, + SNMP destinations, ITSM settings and webhook, email, syslog notifications. - Generated playbooks are compatible with events_and_notifications_workflow_manager module. - When filter blocks (C(destination_filters), C(notification_filters), @@ -417,6 +427,60 @@ component_specific_filters: itsm_filters: instance_names: ["ServiceNow Instance 1", "BMC Remedy Prod"] + +# destination_names with destination_types (new filter in isolation) +- name: Generate config filtering specific destinations by name and type + cisco.dnac.events_and_notifications_playbook_config_generator: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + component_specific_filters: + destination_filters: + destination_names: + - "Scale Syslog 7" + - "Prod Webhook Endpoint" + destination_types: # Required when destination_names is specified + - "syslog" + - "webhook" + +# subscription_names with notification_types (new filter in isolation) +- name: Generate config filtering specific subscriptions by name and type + cisco.dnac.events_and_notifications_playbook_config_generator: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + component_specific_filters: + notification_filters: + subscription_names: + - "Critical Email Alerts" + - "Syslog Infra Events" + notification_types: # Required when subscription_names is specified + - "email" + - "syslog" + +# Combined filters with components_list as the targeting mechanism (multi-entry, mixed) +- name: Generate config with name filters targeted via components_list + cisco.dnac.events_and_notifications_playbook_config_generator: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + component_specific_filters: + components_list: + - "syslog_destinations" # Targets destination_names filter + - "email_event_notifications" # Targets subscription_names filter + - "itsm_settings" # No name filter, retrieves all + destination_filters: + destination_names: + - "Scale Syslog 7" # Filtered — only syslog destinations matched + notification_filters: + subscription_names: + - "Critical Email Alerts" # Filtered — only email notifications matched + itsm_filters: + instance_names: + - "ServiceNow Prod" """ RETURN = r""" @@ -751,14 +815,24 @@ def validate_input(self): component_filters = validated_config.get("component_specific_filters") if config_provided and component_filters is None: - self.msg = ( - "Validation Error: 'component_specific_filters' is mandatory when " - "'config' is provided. Please provide " - "'config.component_specific_filters' with either " - "'components_list' or component filter blocks." + if "component_specific_filters" not in self.config: + self.msg = ( + "Validation Error: 'component_specific_filters' is mandatory when " + "'config' is provided. Please provide " + "'config.component_specific_filters' with either " + "'components_list' or component filter blocks." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log( + "'component_specific_filters' key is present but value is null/empty under " + "'config'. Normalizing to empty dict to enforce standard minimum requirement " + "validation for components_list/component filters.", + "DEBUG" ) - self.set_operation_result("failed", False, self.msg, "ERROR") - return self + component_filters = {} + validated_config["component_specific_filters"] = component_filters self.log( "Schema validation completed successfully. Validated configuration: {0}".format( @@ -776,7 +850,7 @@ def validate_input(self): ) # Validate nested component_specific_filters structure - if component_filters and isinstance(component_filters, dict): + if isinstance(component_filters, dict): self.log( "Validating nested component_specific_filters keys: {0}".format( list(component_filters.keys()) @@ -862,6 +936,47 @@ def validate_input(self): self.set_operation_result("failed", False, self.msg, "ERROR") return self + destination_names = destination_filters.get("destination_names") + if destination_names and isinstance(destination_names, list): + self.log("Validating destination name filters against targeted components.", "DEBUG") + destination_component_map = { + "webhook": "webhook_destinations", + "email": "email_destinations", + "syslog": "syslog_destinations", + "snmp": "snmp_destinations", + } + destination_components = set(destination_component_map.values()) + self.log("Destination component mapping: {0}".format(destination_component_map), "DEBUG") + + targeted_components_from_types = set() + if destination_types: + self.log("Determining targeted components from destination_types filter: {0}".format(destination_types), "DEBUG") + targeted_components_from_types = { + destination_component_map[dest_type] + for dest_type in destination_types + if dest_type in destination_component_map + } + + targeted_components_from_list = set() + if components_list: + self.log("Determining targeted components from components_list filter: {0}".format(components_list), "DEBUG") + targeted_components_from_list = { + component_name + for component_name in components_list + if component_name in destination_components + } + + if not (targeted_components_from_types or targeted_components_from_list): + self.msg = ( + "Validation Error: 'destination_filters.destination_names' requires " + "at least one destination target. Provide " + "'destination_filters.destination_types' (webhook/email/syslog/snmp) " + "or include destination component(s) in " + "'component_specific_filters.components_list'." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + # Validate notification_filters allowed_notification_filter_keys = {"subscription_names", "notification_types"} notification_filters = component_filters.get("notification_filters") @@ -895,6 +1010,48 @@ def validate_input(self): self.set_operation_result("failed", False, self.msg, "ERROR") return self + subscription_names = notification_filters.get("subscription_names") + if subscription_names and isinstance(subscription_names, list): + self.log("Validating subscription name filters against targeted components.", "DEBUG") + notification_component_map = { + "webhook": "webhook_event_notifications", + "email": "email_event_notifications", + "syslog": "syslog_event_notifications", + } + notification_components = set(notification_component_map.values()) + + targeted_notification_components_from_types = set() + if notification_types: + self.log("Determining targeted components from notification_types filter: {0}".format(notification_types), "DEBUG") + targeted_notification_components_from_types = { + notification_component_map[notif_type] + for notif_type in notification_types + if notif_type in notification_component_map + } + + targeted_notification_components_from_list = set() + if components_list: + self.log("Determining targeted components from components_list filter: {0}".format(components_list), "DEBUG") + targeted_notification_components_from_list = { + component_name + for component_name in components_list + if component_name in notification_components + } + + if not ( + targeted_notification_components_from_types + or targeted_notification_components_from_list + ): + self.msg = ( + "Validation Error: 'notification_filters.subscription_names' requires " + "at least one notification target. Provide " + "'notification_filters.notification_types' (webhook/email/syslog) " + "or include notification component(s) in " + "'component_specific_filters.components_list'." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + # Validate itsm_filters allowed_itsm_filter_keys = {"instance_names"} itsm_filters = component_filters.get("itsm_filters") @@ -932,6 +1089,7 @@ def validate_input(self): inferred_components.update( [destination_type_map[dt] for dt in destination_types] ) + else: if not components_list: inferred_components.update( @@ -1741,6 +1899,8 @@ def extract_sites_from_filter(self, notification): sites_from_resource = 0 try: + has_explicit_filter_sites = False + # Check filter for direct sites filter_data = notification.get("filter", {}) if isinstance(filter_data, dict): @@ -1753,6 +1913,7 @@ def extract_sites_from_filter(self, notification): if direct_sites: sites.extend(direct_sites) sites_from_direct = len(direct_sites) + has_explicit_filter_sites = True self.log( "Extracted {0} direct site name(s) from filter.sites: {1}".format( @@ -1764,6 +1925,7 @@ def extract_sites_from_filter(self, notification): # Site IDs in filter - need to resolve to names site_ids = filter_data.get("siteIds", []) if site_ids: + has_explicit_filter_sites = True self.log( "Found {0} site ID(s) requiring resolution: {1}. Calling site API " "to resolve IDs to hierarchical names.".format(len(site_ids), site_ids), @@ -1814,7 +1976,7 @@ def extract_sites_from_filter(self, notification): # Check resourceDomain for site information resource_domain = notification.get("resourceDomain", {}) - if resource_domain: + if resource_domain and not has_explicit_filter_sites: resource_groups = resource_domain.get("resourceGroups", []) self.log( "Processing resource domain with {0} resource group(s). Extracting " @@ -1868,6 +2030,13 @@ def extract_sites_from_filter(self, notification): "DEBUG" ) + elif resource_domain and has_explicit_filter_sites: + self.log( + "Skipping resource domain site extraction because explicit filter sites " + "(sites/siteIds) are already provided in notification filter.", + "DEBUG" + ) + self.log( "Resource domain processing completed. Sites from resource groups: {0}".format( sites_from_resource @@ -3175,12 +3344,11 @@ def get_all_webhook_destinations(self, api_family, api_function): while True: page_count += 1 - current_offset = offset * limit self.log( "Fetching webhook destinations page {0} with offset={1}, limit={2}. " "Calling API to retrieve webhook configurations.".format( - page_count, current_offset, limit + page_count, offset, limit ), "DEBUG" ) @@ -3188,7 +3356,7 @@ def get_all_webhook_destinations(self, api_family, api_function): family=api_family, function=api_function, op_modifies=False, - params={"offset": offset * limit, "limit": limit}, + params={"offset": offset, "limit": limit}, ) self.log( "Received API response for webhook destinations page {0}. Response " @@ -3228,7 +3396,7 @@ def get_all_webhook_destinations(self, api_family, api_function): ) break - offset += 1 + offset += limit self.log( "Webhook destination retrieval completed successfully. Total pages " "fetched: {0}, Total webhooks retrieved: {1}. Returning complete " @@ -3317,63 +3485,109 @@ def get_all_email_destinations(self, api_family, api_function): def get_all_syslog_destinations(self, api_family, api_function): """ - Retrieves all syslog destinations from the API. + Retrieves all syslog destinations using pagination from the API. Description: - This helper method fetches syslog destination configurations from Cisco Catalyst Center. - It extracts syslog configuration data from the API response and handles various - response formats to ensure consistent data retrieval. + This helper method makes paginated API calls to fetch all syslog destination + configurations from Cisco Catalyst Center. It handles API response variations + and continues pagination until all destinations are retrieved. Args: api_family (str): The API family identifier for syslog destinations. api_function (str): The specific API function name for retrieving syslog destinations. Returns: - list: A list of syslog destination dictionaries containing server addresses, - protocols, ports, and other syslog configuration parameters. + list: A list of syslog destination dictionaries containing all available + syslog configurations from the Cisco Catalyst Center. """ self.log( - "Retrieving all syslog destinations from Catalyst Center. API family: {0}, " - "API function: {1}. Calling API to fetch syslog configurations.".format( + "Retrieving all syslog destinations with pagination. API family: {0}, " + "API function: {1}. Starting pagination loop with limit=10.".format( api_family, api_function ), "DEBUG" ) try: - response = self.dnac._exec( - family=api_family, - function=api_function, - op_modifies=False, - params={}, - ) - self.log( - "Received API response for syslog destinations. Response type: {0}. " - "Processing statusMessage field to extract syslog configuration data.".format( - type(response).__name__ - ), - "DEBUG" - ) + offset = 0 + limit = 10 + all_syslogs = [] + page_count = 0 - syslog_configs = response.get("statusMessage", []) + while True: + page_count += 1 - if isinstance(syslog_configs, list): self.log( - "Extracted {0} syslog destination(s) from statusMessage field. " - "Returning syslog configurations for processing.".format( - len(syslog_configs) + "Fetching syslog destinations page {0} with offset={1}, limit={2}. " + "Calling API to retrieve syslog configurations.".format( + page_count, offset, limit ), - "INFO" + "DEBUG" + ) + + response = self.dnac._exec( + family=api_family, + function=api_function, + op_modifies=False, + params={"offset": offset, "limit": limit}, ) - return syslog_configs - else: self.log( - "statusMessage field has unexpected format. Expected list, got: {0}. " - "Returning empty list for graceful handling.".format( - type(syslog_configs).__name__ + "Received API response for syslog destinations page {0}. Response " + "type: {1}. Processing statusMessage field for syslog data.".format( + page_count, type(response).__name__ ), - "WARNING" + "DEBUG" ) - return [] + + syslog_configs = response.get("statusMessage", []) + if not isinstance(syslog_configs, list): + self.log( + "statusMessage has unexpected format in page {0}. Expected list, " + "got: {1}. Terminating pagination.".format( + page_count, type(syslog_configs).__name__ + ), + "WARNING" + ) + break + + if not syslog_configs: + self.log( + "No syslog destinations found in page {0} response. statusMessage " + "field empty or missing. Terminating pagination loop.".format( + page_count + ), + "DEBUG" + ) + break + + all_syslogs.extend(syslog_configs) + self.log( + "Added {0} syslog destination(s) from page {1}. Total accumulated: " + "{2}. Checking if more pages available.".format( + len(syslog_configs), page_count, len(all_syslogs) + ), + "DEBUG" + ) + + if len(syslog_configs) < limit: + self.log( + "Received {0} syslog destination(s) in page {1}, which is less " + "than limit {2}. No more pages available. Terminating pagination.".format( + len(syslog_configs), page_count, limit + ), + "DEBUG" + ) + break + + offset += limit + + self.log( + "Syslog destination retrieval completed successfully. Total pages fetched: " + "{0}, Total syslog destinations retrieved: {1}. Returning complete syslog list.".format( + page_count, len(all_syslogs) + ), + "INFO" + ) + return all_syslogs except Exception as e: self.log( @@ -3417,12 +3631,11 @@ def get_all_snmp_destinations(self, api_family, api_function): while True: page_count += 1 - current_offset = offset * limit self.log( "Fetching SNMP destinations page {0} with offset={1}, limit={2}. " "Calling API to retrieve SNMP configurations.".format( - page_count, current_offset, limit + page_count, offset, limit ), "DEBUG" ) @@ -3431,7 +3644,7 @@ def get_all_snmp_destinations(self, api_family, api_function): family=api_family, function=api_function, op_modifies=False, - params={"offset": offset * limit, "limit": limit}, + params={"offset": offset, "limit": limit}, ) self.log( "Received API response for SNMP destinations page {0}. Response " @@ -3470,7 +3683,7 @@ def get_all_snmp_destinations(self, api_family, api_function): ) break - offset += 1 + offset += limit except Exception as e: self.log( @@ -3633,57 +3846,96 @@ def get_all_itsm_settings(self, api_family, api_function): descriptions, and connection configuration details. """ self.log( - "Retrieving all ITSM settings from Catalyst Center. API family: {0}, " - "API function: {1}. Calling API to fetch ITSM configurations.".format( + "Retrieving all ITSM settings with pagination. API family: {0}, API " + "function: {1}. Starting pagination with page_size=50.".format( api_family, api_function ), "DEBUG" ) try: - response = self.dnac._exec( - family=api_family, - function=api_function, - op_modifies=False, - ) - self.log( - "Received API response for ITSM settings. Response {0}. " - "Processing response structure to extract ITSM configuration data.".format( - response - ), - "DEBUG" - ) + page = 1 + page_size = 50 + all_itsm_settings = [] + page_count = 0 - itsm_settings = [] + while True: + page_count += 1 + self.log( + "Fetching ITSM instances page {0} with page={1}, page_size={2}, " + "sortBy=name, order=asc.".format(page_count, page, page_size), + "DEBUG" + ) + response = self.dnac._exec( + family=api_family, + function=api_function, + op_modifies=False, + params={ + "page_size": page_size, + "page": page, + "sortBy": "name", + "order": "asc", + }, + ) - if isinstance(response, dict): - itsm_settings = response.get("data") or response.get("response", []) - elif isinstance(response, list): - itsm_settings = response + itsm_settings = [] + if isinstance(response, dict): + itsm_settings = response.get("data") or response.get("response", []) + elif isinstance(response, list): + itsm_settings = response - if not isinstance(itsm_settings, list): + if not isinstance(itsm_settings, list): + self.log( + "ITSM listing response format unexpected in page {0}. Expected list, " + "got: {1}. Terminating pagination.".format( + page_count, type(itsm_settings).__name__ + ), + "WARNING" + ) + break + + if not itsm_settings: + self.log( + "No ITSM instances returned in page {0}. Terminating pagination loop.".format( + page_count + ), + "DEBUG" + ) + break + + all_itsm_settings.extend(itsm_settings) self.log( - "ITSM settings has unexpected format. Expected list, got: {0}. " - "Returning empty list for graceful handling.".format( - type(itsm_settings).__name__ + "Added {0} ITSM instance(s) from page {1}. Total accumulated: {2}.".format( + len(itsm_settings), page_count, len(all_itsm_settings) ), - "WARNING" + "DEBUG" ) - return [] + + if len(itsm_settings) < page_size: + self.log( + "Received {0} ITSM instance(s) in page {1}, less than page_size {2}. " + "No more pages available.".format( + len(itsm_settings), page_count, page_size + ), + "DEBUG" + ) + break + + page += 1 self.log( "Extracted {0} ITSM setting(s) from listing API. Now retrieving full " "details for each instance using get_itsm_integration_setting_by_id.".format( - len(itsm_settings) + len(all_itsm_settings) ), "INFO" ) detailed_settings = [] - for idx, item in enumerate(itsm_settings, start=1): + for idx, item in enumerate(all_itsm_settings, start=1): if not isinstance(item, dict): self.log( "Skipping ITSM entry {0}/{1} - not a valid dictionary.".format( - idx, len(itsm_settings) + idx, len(all_itsm_settings) ), "WARNING" ) @@ -3695,7 +3947,7 @@ def get_all_itsm_settings(self, api_family, api_function): if not instance_id: self.log( "Skipping ITSM instance {0}/{1} '{2}' - no 'id' field found in " - "listing response.".format(idx, len(itsm_settings), instance_name), + "listing response.".format(idx, len(all_itsm_settings), instance_name), "WARNING" ) continue @@ -3703,7 +3955,7 @@ def get_all_itsm_settings(self, api_family, api_function): self.log( "Processing ITSM instance {0}/{1} - name: '{2}', ID: '{3}'. " "Fetching full details.".format( - idx, len(itsm_settings), instance_name, instance_id + idx, len(all_itsm_settings), instance_name, instance_id ), "DEBUG" ) @@ -3713,22 +3965,23 @@ def get_all_itsm_settings(self, api_family, api_function): self.log( "Could not retrieve full details for ITSM instance {0}/{1} '{2}' " "(ID: {3}). Falling back to listing data.".format( - idx, len(itsm_settings), instance_name, instance_id + idx, len(all_itsm_settings), instance_name, instance_id ), "WARNING" ) detailed_settings.append(item) + continue detailed_settings.append(detail) self.log( "Successfully retrieved full details for ITSM instance {0}/{1} " - "'{2}'.".format(idx, len(itsm_settings), instance_name), + "'{2}'.".format(idx, len(all_itsm_settings), instance_name), "DEBUG" ) self.log( "Completed ITSM detail retrieval. {0} of {1} instance(s) have full " - "connection details.".format(len(detailed_settings), len(itsm_settings)), + "connection details.".format(len(detailed_settings), len(all_itsm_settings)), "INFO" ) return detailed_settings @@ -4236,12 +4489,13 @@ def get_email_event_notifications(self, network_element, filters): def get_all_email_event_notifications(self, api_family, api_function): """ - Retrieves all email event notifications from the API. + Retrieves all email event notifications using pagination from the API. Description: - This helper method fetches email event notification configurations from - Cisco Catalyst Center. It handles different response formats and extracts - email subscription data from the API response. + This helper method makes paginated API calls to fetch all email event + notification configurations from Cisco Catalyst Center. It handles + different response formats and continues pagination until all + notifications are retrieved. Args: api_family (str): The API family identifier for email event notifications. @@ -4252,54 +4506,91 @@ def get_all_email_event_notifications(self, api_family, api_function): endpoints, event filters, and email configuration details. """ self.log( - "Retrieving all email event notifications from Catalyst Center. API family: {0}, " - "API function: {1}. Calling API to fetch email subscription configurations.".format( + "Retrieving all email event notifications with pagination. API family: {0}, " + "API function: {1}. Starting pagination loop with limit=10.".format( api_family, api_function ), "DEBUG" ) try: - response = self.dnac._exec( - family=api_family, - function=api_function, - op_modifies=False, - params={} - ) - self.log( - "Received API response for email event notifications. Response type: {0}. " - "Processing response structure to extract subscription data.".format( - type(response).__name__ - ), - "DEBUG" - ) + offset = 0 + limit = 10 + all_notifications = [] + page_count = 0 - if isinstance(response, list): - notifications = response + while True: + page_count += 1 self.log( - "API response is list format with {0} email event notification(s). " - "Returning notification list directly for processing.".format(len(response)), - "INFO" + "Fetching email event notifications page {0} with offset={1}, limit={2}. " + "Calling API to retrieve email subscription configurations.".format( + page_count, offset, limit + ), + "DEBUG" ) - elif isinstance(response, dict): - notifications = response.get("response", []) + response = self.dnac._exec( + family=api_family, + function=api_function, + op_modifies=False, + params={"offset": offset, "limit": limit} + ) self.log( - "API response is dictionary format. Extracted {0} email event notification(s) " - "from response field. Returning subscription configurations.".format( - len(notifications) + "Received API response for email event notifications page {0}. " + "Response type: {1}. Processing response structure for subscription data.".format( + page_count, type(response).__name__ ), - "INFO" + "DEBUG" ) - else: + if isinstance(response, list): + notifications = response + elif isinstance(response, dict): + notifications = response.get("response", []) + else: + notifications = [] + self.log( + "API response has unexpected format in page {0}. Response type: {1}. " + "Terminating pagination.".format(page_count, type(response).__name__), + "WARNING" + ) + break + + if not notifications: + self.log( + "No email event notifications found in page {0} response. " + "Terminating pagination loop.".format(page_count), + "DEBUG" + ) + break + + all_notifications.extend(notifications) self.log( - "API response has unexpected format. Response type: {0}. Returning empty " - "list for graceful handling.".format(type(response).__name__), - "WARNING" + "Added {0} email event notification(s) from page {1}. Total accumulated: {2}.".format( + len(notifications), page_count, len(all_notifications) + ), + "DEBUG" ) - notifications = [] - return notifications + if len(notifications) < limit: + self.log( + "Received {0} email event notification(s) in page {1}, which is less " + "than limit {2}. No more pages available. Terminating pagination.".format( + len(notifications), page_count, limit + ), + "DEBUG" + ) + break + + offset += limit + + self.log( + "Email event notification retrieval completed successfully. Total pages fetched: " + "{0}, Total email event notifications retrieved: {1}. Returning complete notification list.".format( + page_count, len(all_notifications) + ), + "INFO" + ) + return all_notifications except Exception as e: self.log( @@ -5025,8 +5316,13 @@ def yaml_config_generator(self, yaml_config_generator): ), "DEBUG" ) + header_notes = [ + "Auto-generated configuration for Events and Notifications workflow.", + "Components requested: {0}".format(", ".join(components_list)), + "File mode: {0}".format(file_mode), + ] - if self.write_dict_to_yaml(playbook_data, file_path, file_mode): + if self.write_dict_to_yaml(playbook_data, file_path, file_mode, notes=header_notes): success_message = "YAML configuration file generated successfully for module '{0}'".format(self.module_name) response_data = { diff --git a/plugins/modules/inventory_playbook_config_generator.py b/plugins/modules/inventory_playbook_config_generator.py index 59e07eca35..cd28b7bc46 100644 --- a/plugins/modules/inventory_playbook_config_generator.py +++ b/plugins/modules/inventory_playbook_config_generator.py @@ -14,86 +14,19 @@ short_description: Generate YAML playbook input for 'inventory_workflow_manager' module. description: - Generates YAML input files for C(cisco.dnac.inventory_workflow_manager). - - Supports independent component generation for device details, SDA provisioning, - interface details, and user-defined fields. - - Supports global device filters by IP, hostname, serial number, and MAC address. - - In non-auto mode, provide C(component_specific_filters.components_list) to - control which component sections are generated. + - Supports independent component generation for device details, device + provisioning, interface details, and user-defined fields. + - Supports global device filters by IP address, hostname, serial number, and + MAC address. + - If C(config) is omitted or empty, the module generates all supported + components. version_added: 6.44.0 +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params author: - Mridul Saurabh (@msaurabh) - Madhan Sankaranarayanan (@madsanka) options: - dnac_host: - description: Cisco Catalyst Center hostname or IP address. - type: str - required: true - dnac_port: - description: Cisco Catalyst Center port number. - type: str - default: "443" - required: false - dnac_username: - description: Cisco Catalyst Center username. - type: str - default: "admin" - required: false - aliases: - - user - dnac_password: - description: Cisco Catalyst Center password. - type: str - required: false - dnac_verify: - description: Verify SSL certificate for Cisco Catalyst Center. - type: bool - default: true - required: false - dnac_version: - description: Cisco Catalyst Center version. - type: str - default: "2.2.3.3" - required: false - dnac_debug: - description: Enable debug logging. - type: bool - default: false - required: false - dnac_log_level: - description: Log level for module execution. - type: str - default: "WARNING" - required: false - dnac_log_file_path: - description: Path for debug log file. - type: str - default: "dnac.log" - required: false - dnac_log_append: - description: Append to log file instead of overwriting. - type: bool - default: true - required: false - dnac_log: - description: Enable logging to file. - type: bool - default: false - required: false - validate_response_schema: - description: Validate response schema from API. - type: bool - default: true - required: false - dnac_api_task_timeout: - description: API task timeout in seconds. - type: int - default: 1200 - required: false - dnac_task_poll_interval: - description: Task poll interval in seconds. - type: int - default: 2 - required: false state: description: The desired state of Cisco Catalyst Center after module execution. type: str @@ -101,169 +34,157 @@ - gathered default: "gathered" required: false + file_path: + description: + - Path where the generated YAML configuration file will be saved. + - If not provided, the module generates a default file name using the + brownfield helper. + - The generated file name format is + C(inventory_playbook_config_.yml). + type: str + required: false + file_mode: + description: + - File write mode for the generated YAML configuration file. + - Relevant only when C(file_path) is provided. + - C(overwrite) replaces the existing file content. + - C(append) appends new YAML documents to the existing file. + - When omitted, defaults to C(overwrite). + type: str + choices: + - overwrite + - append + default: overwrite + required: false config: description: - - A list of filters for generating YAML playbook compatible with the 'inventory_workflow_manager' module. - - Filters specify which devices and credentials to include in the YAML configuration file. - - If "components_list" is specified, only those components are included, regardless of the filters. + - Dictionary of filters controlling which inventory components are + generated for C(inventory_workflow_manager). + - If omitted or empty, the module generates all supported components. + - If provided, it may contain only C(global_filters) and/or + C(component_specific_filters). + - At least one of C(global_filters) or C(component_specific_filters) must + be present when C(config) is provided. type: dict - required: true + required: false suboptions: - generate_all_configurations: - description: - - When set to True, automatically generates YAML configurations for all devices in Cisco Catalyst Center. - - This mode discovers all managed devices in Cisco Catalyst Center and extracts all device inventory configurations. - - When enabled, the config parameter becomes optional and will use default values if not provided. - - A default filename will be generated automatically if file_path is not specified. - - This is useful for complete infrastructure discovery and documentation. - - Note - Only devices with manageable software versions are included in the output. - type: bool - required: false - default: false - file_path: - description: - - Path where the YAML configuration file will be saved. - - If not provided, the file will be saved in the current working directory with - a default file name C(inventory_playbook_config_.yml). - - For example, C(inventory_playbook_config_2026-01-24_12-33-20.yml). - type: str - file_mode: - description: - - Controls how config is written to the YAML file. - - C(overwrite) replaces existing file content. - - C(append) appends generated YAML content to the existing file. - type: str - choices: ["overwrite", "append"] - default: "overwrite" global_filters: description: - - Global filters to apply when generating the YAML configuration file. - - These filters apply to all components unless overridden by component-specific filters. - - Supports filtering devices by IP address, hostname, serial number, or MAC address. + - Global device filters applied across all components during playbook + generation. + - Supports filtering by IP address, hostname, serial number, or MAC + address. + - When multiple filter types are specified, they are combined using + OR logic and a device matching any specified filter is included. + - Omitting a filter type means no restriction is applied on that + attribute. + - Evaluation order is C(ip_address_list), C(hostname_list), + C(serial_number_list), then C(mac_address_list). type: dict suboptions: ip_address_list: description: - - List of device IP addresses to include in the YAML configuration file. - - When specified, only devices with matching management IP addresses will be included. - - For example, ["192.168.1.1", "192.168.1.2", "192.168.1.3"] + - List of device management IP addresses to include in the + generated YAML configuration. + - Each value is matched against the C(managementIpAddress) field + returned by Cisco Catalyst Center. + - When multiple global filter types are specified, they are + combined using OR logic and a device matching any filter is + included. + - When omitted, no IP-based filtering is applied. + - For example, C(['10.1.1.1', '10.1.1.2']). type: list elements: str hostname_list: description: - - List of device hostnames to include in the YAML configuration file. - - When specified, only devices with matching hostnames will be included. - - For example, ["switch-1", "router-1", "firewall-1"] + - List of device hostnames to include in the generated YAML + configuration. type: list elements: str serial_number_list: description: - - List of device serial numbers to include in the YAML configuration file. - - When specified, only devices with matching serial numbers will be included. - - For example, ["ABC123456789", "DEF987654321"] + - List of device serial numbers to include in the generated YAML + configuration. type: list elements: str mac_address_list: description: - - List of device MAC addresses to include in the YAML configuration file. - - When specified, only devices with matching MAC addresses will be included. - - For example, ["e4:1f:7b:d7:bd:00", "a1:b2:c3:d4:e5:f6"] + - List of device MAC addresses to include in the generated YAML + configuration. type: list elements: str component_specific_filters: description: - - Filters to specify which components and device attributes to include in the YAML configuration file. - - If "components_list" is specified, only those components are included. - - Additional filters can be applied to narrow down device selection based on role, type, etc. + - Component-level filters controlling which sections are included in + the generated YAML configuration. + - If no component filter blocks are provided, C(components_list) is + required and must be non-empty. + - If a component filter block is provided, the corresponding component + is auto-added to C(components_list) if missing. type: dict suboptions: components_list: description: - - List of components to include in the YAML configuration file. - - Valid values are "device_details", "provision_device", "interface_details", and "user_defined_fields". - - If not specified, all components are included. + - List of components to include in the generated YAML + configuration file. + - Optional when one or more component filter blocks are provided. + - Required when no component filter blocks are provided. type: list elements: str choices: - - device_details - - provision_device - - interface_details - - user_defined_fields + - device_details + - provision_device + - interface_details + - user_defined_fields device_details: description: - - Filters for device configuration generation. - - Accepts a dict or a list of dicts. - - List behavior OR between dict entries. - - Dict behavior AND between filter keys. - - Supported keys include type, role, snmp_version, and cli_transport. - - 'Type options: NETWORK_DEVICE, COMPUTE_DEVICE, MERAKI_DASHBOARD, THIRD_PARTY_DEVICE, FIREPOWER_MANAGEMENT_SYSTEM.' - - 'Role options: ACCESS, CORE, DISTRIBUTION, BORDER ROUTER, UNKNOWN.' - - 'SNMP version options: v2, v2c, v3.' - - 'CLI transport options: ssh or telnet.' + - Filters for device detail generation. + - Accepts either a single dictionary or a list of dictionaries. + - Multiple dictionaries use OR logic and keys inside each + dictionary use AND logic. + - "Supported filter keys: C(type), C(role), C(snmp_version), + C(cli_transport)." + - "Allowed C(type) values: C(NETWORK_DEVICE), + C(COMPUTE_DEVICE), C(MERAKI_DASHBOARD), + C(THIRD_PARTY_DEVICE), + C(FIREPOWER_MANAGEMENT_SYSTEM)." + - "Allowed C(role) values: C(ACCESS), C(CORE), + C(DISTRIBUTION), C(BORDER ROUTER), C(UNKNOWN). + Accepts a string or list of strings." + - "Allowed C(snmp_version) values: C(v2), C(v2c), C(v3)." + - "Allowed C(cli_transport) values: C(ssh), C(telnet)." + - When omitted, no device-attribute filtering is applied. type: raw - suboptions: - role: - description: - - Filter devices by network role. - - Can be a single role string or a list of roles (matches any in the list). - - Valid values are ACCESS, CORE, DISTRIBUTION, BORDER ROUTER, UNKNOWN. - - 'Example: role="ACCESS" for single role or role=["ACCESS", "CORE"] for multiple roles.' - type: str - choices: - - ACCESS - - CORE - - DISTRIBUTION - - BORDER ROUTER - - UNKNOWN provision_device: description: - - Specific filters for provision_device component. - - Filters the provision_wired_device configuration based on site assignment. - - No additional API calls are made; filtering is applied to existing provision data. + - Filters the provision device output by site name. type: dict suboptions: site_name: description: - - Filter provision devices by site name (e.g., Global/India/Telangana/Hyderabad/BLD_1). + - Hierarchical site name used to filter provisioned devices. type: str interface_details: description: - - Component selector for auto-generated interface_details. - - Filters interface configurations based on device IP addresses and interface names. - - Interfaces are automatically discovered from matched devices using Catalyst Center API. + - Filters interface details by interface name. type: dict suboptions: interface_name: description: - - Filter interfaces by name (optional). - - Can be a single interface name string or a list of interface names. - - When specified, only interfaces with matching names will be included. - - Matches use 'OR' logic; any interface matching any name in the list is included. - - Common interface names include Vlan100, Loopback0, GigabitEthernet1/0/1, or FortyGigabitEthernet1/1/1. - - If not specified, all discovered interfaces for matched devices are included. - - 'Example: interface_name="Vlan100" for single or interface_name=["Vlan100", "Loopback0"] for multiple.' - type: str + - Single interface name or list of interface names to include. + type: raw user_defined_fields: description: - - Filters for user-defined fields (UDF) component generation. - - Supports filtering by UDF field name and/or UDF field value. - - Both C(name) and C(value) accept a single string or a list of strings. - - List behavior uses OR logic (match any item in the list). + - Filters user-defined field output by field name and/or value. type: dict suboptions: name: description: - - Filter UDF output by field name. - - Accepts a single name string or a list of names. - - When specified, only matching UDF names are included. - - 'Example: name="Cisco Switches" or name=["Cisco Switches", "To_test_udf"].' + - Single field name or list of field names to include. type: raw value: description: - - Filter UDF output by field value. - - Accepts a single value string or a list of values. - - When specified, only UDFs with matching values are included. - - 'Example: value="2234" or value=["2234", "value12345"].' + - Single field value or list of field values to include. type: raw @@ -294,7 +215,7 @@ """ EXAMPLES = r""" -- name: Generate inventory playbook for all devices +- name: Generate inventory playbook with no config (generate all) cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -304,10 +225,8 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered - config: - generate_all_configurations: true - file_mode: "overwrite" - file_path: "./inventory_devices_all.yml" + # file_path omitted - module auto-generates a timestamped filename + # config omitted - generates all supported components - name: Generate inventory playbook for specific devices by IP address cisco.dnac.inventory_playbook_config_generator: @@ -319,15 +238,20 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_devices_by_ip.yml" + file_mode: "overwrite" config: global_filters: ip_address_list: - "10.195.225.40" - "10.195.225.42" - file_mode: "overwrite" - file_path: "./inventory_devices_by_ip.yml" + component_specific_filters: + components_list: + - device_details + - provision_device + - interface_details -- name: Generate inventory playbook for devices by hostname +- name: Generate inventory playbook filtered by hostname cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -337,16 +261,17 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_hostname_filter.yml" config: global_filters: hostname_list: - - "cat9k_1" - - "cat9k_2" - - "switch_1" - file_mode: "overwrite" - file_path: "./inventory_devices_by_hostname.yml" + - "switch-floor1.example.com" + - "switch-floor2.example.com" + component_specific_filters: + components_list: + - device_details -- name: Generate inventory playbook for devices by serial number +- name: Generate inventory playbook filtered by serial number cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -356,15 +281,17 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_serial_filter.yml" config: global_filters: serial_number_list: - - "FCW2147L0AR1" - - "FCW2147L0AR2" - file_mode: "overwrite" - file_path: "./inventory_devices_by_serial.yml" + - "FJC2327U0S2" + component_specific_filters: + components_list: + - device_details + - provision_device -- name: Generate inventory playbook for mixed device filtering +- name: Generate inventory with combined global filters (OR logic) cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -374,52 +301,20 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_combined_filters.yml" config: global_filters: + # OR logic - device matching ANY filter is included ip_address_list: - "10.195.225.40" hostname_list: - - "cat9k_1" - file_mode: "overwrite" - file_path: "./inventory_devices_mixed_filter.yml" - -- name: Generate inventory playbook with default file path - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - global_filters: - ip_address_list: - - "10.195.225.40" - file_mode: "overwrite" - -- name: Generate inventory playbook for multiple devices - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - global_filters: - ip_address_list: - - "10.195.225.40" - - "10.195.225.41" - - "10.195.225.42" - - "10.195.225.43" - file_mode: "overwrite" - file_path: "./inventory_devices_multiple.yml" + - "switch-floor2.example.com" + component_specific_filters: + components_list: + - device_details + - interface_details -- name: Generate inventory playbook for ACCESS role devices only +- name: Generate inventory with AND device_details filters cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -429,15 +324,16 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_and_filter.yml" + file_mode: "overwrite" config: component_specific_filters: - components_list: ["device_details"] device_details: - - role: "ACCESS" - file_mode: "overwrite" - file_path: "./inventory_access_role_devices.yml" + # AND filter - device must match BOTH role AND type + role: "ACCESS" + type: "NETWORK_DEVICE" -- name: Generate inventory playbook with auto-populated provision_wired_device +- name: Generate inventory playbook for ACCESS role devices cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -447,10 +343,12 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_access_role_devices.yml" + file_mode: "overwrite" config: - generate_all_configurations: true - file_mode: "overwrite" - file_path: "./inventory_with_provisioning.yml" + component_specific_filters: + device_details: + role: "ACCESS" - name: Generate inventory playbook with interface filtering cisco.dnac.inventory_playbook_config_generator: @@ -462,6 +360,8 @@ dnac_version: "{{ dnac_version }}" dnac_debug: "{{ dnac_debug }}" state: gathered + file_path: "./inventory_interface_filtered.yml" + file_mode: "overwrite" config: global_filters: ip_address_list: @@ -472,53 +372,8 @@ interface_name: - "Vlan100" - "GigabitEthernet1/0/1" - file_mode: "overwrite" - file_path: "./inventory_interface_filtered.yml" - -- name: Generate inventory playbook for specific interface on single device - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - global_filters: - ip_address_list: - - "10.195.225.40" - component_specific_filters: - interface_details: - interface_name: "Loopback0" - file_mode: "overwrite" - file_path: "./inventory_loopback_interface.yml" - -- name: Generate complete inventory with all components and interface filter - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - component_specific_filters: - components_list: ["device_details", "provision_device", "interface_details"] - device_details: - role: "ACCESS" - interface_details: - interface_name: - - "GigabitEthernet1/0/1" - - "GigabitEthernet1/0/2" - - "GigabitEthernet1/0/3" - file_mode: "overwrite" - file_path: "./inventory_access_with_interfaces.yml" -- name: Generate UDF output filtered by name (single string) +- name: Generate inventory playbook with filtered user-defined fields cisco.dnac.inventory_playbook_config_generator: dnac_host: "{{ dnac_host }}" dnac_port: "{{ dnac_port }}" @@ -530,65 +385,9 @@ state: gathered config: component_specific_filters: - components_list: ["user_defined_fields"] user_defined_fields: name: "Cisco Switches" - file_mode: "overwrite" - file_path: "./inventory_udf_name_single.yml" - -- name: Generate UDF output filtered by name (list) - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - component_specific_filters: - components_list: ["user_defined_fields"] - user_defined_fields: - name: ["Cisco Switches", "To_test_udf"] - file_mode: "overwrite" - file_path: "./inventory_udf_name_list.yml" - -- name: Generate UDF output filtered by value (single string) - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - component_specific_filters: - components_list: ["user_defined_fields"] - user_defined_fields: - value: "2234" - file_mode: "overwrite" - file_path: "./inventory_udf_value_single.yml" - -- name: Generate UDF output filtered by value (list) - cisco.dnac.inventory_playbook_config_generator: - dnac_host: "{{ dnac_host }}" - dnac_port: "{{ dnac_port }}" - dnac_username: "{{ dnac_username }}" - dnac_password: "{{ dnac_password }}" - dnac_verify: "{{ dnac_verify }}" - dnac_version: "{{ dnac_version }}" - dnac_debug: "{{ dnac_debug }}" - state: gathered - config: - component_specific_filters: - components_list: ["user_defined_fields"] - user_defined_fields: - value: ["2234", "value12345", "value321"] - file_mode: "overwrite" - file_path: "./inventory_udf_value_list.yml" + value: ["2234", "value12345"] """ RETURN = r""" # Case_1: Success Scenario @@ -673,7 +472,17 @@ def __init__(self, module): def validate_input(self): """ - Validates the input configuration parameters for the playbook. + Validate and normalize module input. + + Behavior: + - If C(config) is omitted or empty, generates all supported + components + - Uses top-level C(file_path) and C(file_mode) + - If C(config) is provided, it may contain only C(global_filters) + and/or C(component_specific_filters) + - Auto-adds components to C(components_list) when component filter + blocks are provided + Returns: object: An instance of the class with updated attributes: self.msg: A message describing the validation result. @@ -681,55 +490,458 @@ def validate_input(self): self.validated_config: If successful, a validated version of the "config" parameter. """ self.log("Starting validation of input configuration parameters.", "DEBUG") + normalized_config = self.config + + file_path = self.params.get("file_path") + file_mode = self.params.get("file_mode", "overwrite") + if file_mode not in ("overwrite", "append"): + self.msg = ( + "Invalid value for 'file_mode': '{0}'. " + "Allowed values are: ['overwrite', 'append'].".format(file_mode) + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self - # Check if configuration is available - if not self.config: - self.status = "success" - self.msg = "Configuration is not available in the playbook for validation" - self.log(self.msg, "ERROR") + if not file_path and file_mode != "overwrite": + self.log( + "file_mode='{0}' is ignored because file_path is not provided.".format( + file_mode + ), + "WARNING", + ) + + if not normalized_config: + self.validated_config = {"generate_all_configurations": True} + self.msg = "No config provided. Generating all supported components." + self.set_operation_result("success", False, self.msg, "INFO") return self - # Expected schema for configuration parameters + allowed_keys = { + "global_filters", + "component_specific_filters", + } + self.validate_invalid_params(normalized_config, allowed_keys) + temp_spec = { - "generate_all_configurations": { - "type": "bool", - "required": False, - "default": False - }, - "file_mode": { - "type": "str", - "required": False, - "default": "overwrite", - "choices": ["overwrite", "append"] - }, - "file_path": { - "type": "str", - "required": False - }, "component_specific_filters": { "type": "dict", - "required": False + "required": False, }, "global_filters": { "type": "dict", - "required": False}, + "required": False, + }, } - # Validate params - self.log("Validating configuration against schema.", "DEBUG") - valid_temp = self.validate_config_dict(self.config, temp_spec) + validated_config = self.validate_config_dict(normalized_config, temp_spec) + if not isinstance(validated_config, dict): + validated_config = dict(normalized_config) + else: + validated_config = { + key: value for key, value in validated_config.items() if value is not None + } + + global_filters = validated_config.get("global_filters") + component_filters = validated_config.get("component_specific_filters") + + if global_filters is None and component_filters is None: + self.msg = ( + "Validation Error: when 'config' is provided, at least one of " + "'global_filters' or 'component_specific_filters' must be present." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self - self.log("Validating minimum requirements against provided config: {0}".format(self.config), "DEBUG") - self.validate_minimum_requirements(self.config) + if component_filters is not None: + validation_error = self._validate_inventory_component_specific_filters( + component_filters + ) + if validation_error: + self.msg = validation_error + self.set_operation_result("failed", False, self.msg, "ERROR") + return self - # Set the validated configuration and update the result with success status - self.validated_config = valid_temp - self.msg = "Successfully validated playbook configuration parameters using 'validated_input': {0}".format( - str(valid_temp) + self.validated_config = validated_config + self.msg = ( + "Successfully validated playbook configuration parameters using " + "'validated_input': {0}".format(str(validated_config)) ) self.set_operation_result("success", False, self.msg, "INFO") return self + def _validate_inventory_string_or_list(self, value, field_name): + """ + Validate and auto-populate component-specific filters. + + Checks key validity, validates each component block via + _validate_inventory_component_block, and auto-adds + components to components_list when filter blocks are + provided. + + Args: + component_filters (dict): The + 'component_specific_filters' dictionary from + the user's config input. + + Returns: + str | None: Error message string if validation + fails, None on success. + """ + if isinstance(value, str): + return None + + if isinstance(value, list) and all(isinstance(item, str) for item in value): + return None + + return ( + "'{0}' must be a string or a list of strings, got: {1}.".format( + field_name, type(value).__name__ + ) + ) + + def _validate_inventory_component_block(self, component_name, component_value): + """ + Validate a single component filter block for inventory generation. + + For 'device_details', accepts a dictionary or list of dictionaries. + Keys inside each dictionary use AND logic and multiple dictionaries use + OR logic. For other components, accepts a dictionary only. + + Args: + component_name (str): One of 'device_details', + 'provision_device', 'interface_details', or + 'user_defined_fields'. + component_value (dict | list): The filter block to validate. + + Returns: + str | None: Error message if validation fails, otherwise None. + """ + if component_name == "device_details": + if isinstance(component_value, dict): + filter_items = [component_value] + + elif isinstance(component_value, list): + filter_items = component_value + + else: + return ( + "'device_details' must be a dictionary or list of dictionaries, " + "got: {0}.".format(type(component_value).__name__) + ) + + allowed_filter_keys = {"type", "role", "snmp_version", "cli_transport"} + allowed_types = { + "NETWORK_DEVICE", + "COMPUTE_DEVICE", + "MERAKI_DASHBOARD", + "THIRD_PARTY_DEVICE", + "FIREPOWER_MANAGEMENT_SYSTEM", + } + allowed_roles = { + "ACCESS", + "CORE", + "DISTRIBUTION", + "BORDER ROUTER", + "UNKNOWN", + } + allowed_snmp_versions = {"v2", "v2c", "v3"} + allowed_cli_transports = {"ssh", "telnet", "SSH", "TELNET"} + + for index, filter_item in enumerate(filter_items, start=1): + if not isinstance(filter_item, dict): + return ( + "Each entry in 'device_details' must be a dictionary, but entry " + "{0} is of type: {1}.".format( + index, type(filter_item).__name__ + ) + ) + + invalid_keys = set(filter_item.keys()) - allowed_filter_keys + if invalid_keys: + return ( + "Invalid parameters found in 'device_details' filter entry {0}: " + "{1}. Allowed parameters are: {2}.".format( + index, + sorted(list(invalid_keys)), + sorted(list(allowed_filter_keys)), + ) + ) + + device_type = filter_item.get("type") + if device_type is not None: + if not isinstance(device_type, str) or device_type not in allowed_types: + return ( + "Invalid 'type' value '{0}' in 'device_details'. " + "Allowed values are: {1}.".format( + device_type, sorted(list(allowed_types)) + ) + ) + + role = filter_item.get("role") + if role is not None: + if isinstance(role, str): + role_values = [role] + filter_item["role"] = role_values + + elif isinstance(role, list): + role_values = role + + else: + return ( + "'device_details.role' must be a string or list of strings, " + "got: {0}.".format(type(role).__name__) + ) + + invalid_roles = [ + role_item + for role_item in role_values + if not isinstance(role_item, str) or role_item not in allowed_roles + ] + if invalid_roles: + return ( + "Invalid role value(s) in 'device_details.role': {0}. " + "Allowed values are: {1}.".format( + invalid_roles, sorted(list(allowed_roles)) + ) + ) + + snmp_version = filter_item.get("snmp_version") + if snmp_version is not None: + if ( + not isinstance(snmp_version, str) + or snmp_version not in allowed_snmp_versions + ): + return ( + "Invalid 'snmp_version' value '{0}' in 'device_details'. " + "Allowed values are: {1}.".format( + snmp_version, sorted(list(allowed_snmp_versions)) + ) + ) + + cli_transport = filter_item.get("cli_transport") + if cli_transport is not None: + if ( + not isinstance(cli_transport, str) + or cli_transport not in allowed_cli_transports + ): + return ( + "Invalid 'cli_transport' value '{0}' in 'device_details'. " + "Allowed values are: {1}.".format( + cli_transport, sorted(list(allowed_cli_transports)) + ) + ) + return None + + if not isinstance(component_value, dict): + return ( + "'{0}' must be a dictionary, got: {1}.".format( + component_name, type(component_value).__name__ + ) + ) + + if component_name == "provision_device": + invalid_keys = set(component_value.keys()) - {"site_name"} + if invalid_keys: + return ( + "Invalid parameters found in 'provision_device': {0}. " + "Allowed parameters are: ['site_name'].".format( + sorted(list(invalid_keys)) + ) + ) + + site_name = component_value.get("site_name") + if site_name is not None and not isinstance(site_name, str): + self.log( + "Validation failed: 'provision_device.site_name' must be " + "a string, got '{0}'.".format(type(site_name).__name__), + "WARNING", + ) + return "'provision_device.site_name' must be a string." + + return None + + if component_name == "interface_details": + invalid_keys = set(component_value.keys()) - {"interface_name"} + if invalid_keys: + return ( + "Invalid parameters found in 'interface_details': {0}. " + "Allowed parameters are: ['interface_name'].".format( + sorted(list(invalid_keys)) + ) + ) + interface_name = component_value.get("interface_name") + if interface_name is not None: + validation_error = self._validate_inventory_string_or_list( + interface_name, "interface_details.interface_name" + ) + if validation_error: + return validation_error + if isinstance(interface_name, str): + component_value["interface_name"] = [interface_name] + return None + + if component_name == "user_defined_fields": + invalid_keys = set(component_value.keys()) - {"name", "value"} + if invalid_keys: + return ( + "Invalid parameters found in 'user_defined_fields': {0}. " + "Allowed parameters are: ['name', 'value'].".format( + sorted(list(invalid_keys)) + ) + ) + + for field_name in ("name", "value"): + field_value = component_value.get(field_name) + if field_value is not None: + validation_error = self._validate_inventory_string_or_list( + field_value, "user_defined_fields.{0}".format(field_name) + ) + if validation_error: + return validation_error + if isinstance(field_value, str): + component_value[field_name] = [field_value] + + return None + + return None + + def _validate_inventory_component_specific_filters(self, component_filters): + """ + Validate and auto-populate component-specific filters. + + Checks key validity, validates each component block via + _validate_inventory_component_block(), and auto-adds components to + 'components_list' when filter blocks are provided. + + Args: + component_filters (dict): The 'component_specific_filters' + dictionary from the user configuration. + + Returns: + str | None: Error message if validation fails, otherwise None. + """ + if not isinstance(component_filters, dict): + return ( + "'component_specific_filters' must be a dictionary, got: {0}.".format( + type(component_filters).__name__ + ) + ) + + if "component_list" in component_filters: + return "Invalid key 'component_list' under component_specific_filters. Use 'components_list'." + + allowed_component_filter_keys = { + "components_list", + "device_details", + "provision_device", + "interface_details", + "user_defined_fields", + } + invalid_filter_keys = set(component_filters.keys()) - allowed_component_filter_keys + if invalid_filter_keys: + return ( + "Invalid keys found in 'component_specific_filters': {0}. " + "Allowed keys are: {1}.".format( + sorted(list(invalid_filter_keys)), + sorted(list(allowed_component_filter_keys)), + ) + ) + + components_list = component_filters.get("components_list") + normalized_components_list = [] + allowed_components = { + "device_details", + "provision_device", + "interface_details", + "user_defined_fields", + } + + if components_list is not None: + if not isinstance(components_list, list): + return ( + "'components_list' must be a list, got: {0}.".format( + type(components_list).__name__ + ) + ) + + invalid_components = [ + component + for component in components_list + if component not in allowed_components + ] + if invalid_components: + return ( + "Invalid component names found in 'components_list': {0}. " + "Allowed values are: {1}.".format( + sorted(list(set(invalid_components))), + sorted(list(allowed_components)), + ) + ) + + seen_components = set() + duplicate_components = [] + for component_name in components_list: + if component_name in seen_components and component_name not in duplicate_components: + duplicate_components.append(component_name) + seen_components.add(component_name) + normalized_components_list.append(component_name) + + if duplicate_components: + return ( + "Duplicate component names found in 'components_list': {0}. " + "Each component may be specified only once.".format( + duplicate_components + ) + ) + + component_blocks = [] + component_names_to_check = ( + "device_details", + "provision_device", + "interface_details", + "user_defined_fields", + ) + total_component_names = len(component_names_to_check) + for idx, component_name in enumerate(component_names_to_check, start=1): + if component_name not in component_filters: + self.log( + "Processing component {0}/{1}: '{2}' is not present in " + "component_specific_filters. Continuing.".format( + idx, total_component_names, component_name + ), + "DEBUG", + ) + continue + + self.log( + "Processing component {0}/{1}: validating filter block for " + "'{2}'.".format(idx, total_component_names, component_name), + "DEBUG", + ) + validation_error = self._validate_inventory_component_block( + component_name, component_filters.get(component_name) + ) + if validation_error: + return validation_error + + component_blocks.append(component_name) + + if component_blocks: + for component_name in component_blocks: + if component_name not in normalized_components_list: + normalized_components_list.append(component_name) + component_filters["components_list"] = normalized_components_list + elif not normalized_components_list: + return ( + "Validation Error: 'components_list' is mandatory and must be " + "non-empty when no component filter blocks are provided under " + "'component_specific_filters'." + ) + else: + component_filters["components_list"] = normalized_components_list + + return None + def get_workflow_filters_schema(self): """ Description: Returns the schema for workflow filters supported by the module. @@ -741,29 +953,76 @@ def get_workflow_filters_schema(self): schema = { "network_elements": { "device_details": { - "filters": ["ip_address", "hostname", "serial_number", "role"], + "filters": { + "type": { + "type": "str", + "required": False, + "choices": [ + "NETWORK_DEVICE", + "COMPUTE_DEVICE", + "MERAKI_DASHBOARD", + "THIRD_PARTY_DEVICE", + "FIREPOWER_MANAGEMENT_SYSTEM", + ], + }, + "role": { + "type": "list", + "required": False, + "elements": "str", + "choices": [ + "ACCESS", + "CORE", + "DISTRIBUTION", + "BORDER ROUTER", + "UNKNOWN", + ], + }, + "snmp_version": { + "type": "str", + "required": False, + "choices": ["v2", "v2c", "v3"], + }, + "cli_transport": { + "type": "str", + "required": False, + "choices": ["ssh", "telnet", "SSH", "TELNET"], + }, + }, "api_function": "get_device_list", "api_family": "devices", "reverse_mapping_function": self.inventory_get_device_reverse_mapping, "get_function_name": self.get_device_details_details, }, "provision_device": { - "filters": ["site_name"], + "filters": { + "site_name": { + "type": "str", + "required": False, + }, + }, "is_filter_only": True, }, "interface_details": { - "filters": ["interface_name"], + "filters": { + "interface_name": { + "type": "list", + "required": False, + "elements": "str", + }, + }, "is_filter_only": True, }, "user_defined_fields": { "filters": { "name": { - "type": ["str", "list"], + "type": "list", "required": False, + "elements": "str", }, "value": { - "type": ["str", "list"], + "type": "list", "required": False, + "elements": "str", }, }, "api_function": "get_device_list", @@ -2907,7 +3166,8 @@ def yaml_config_generator(self, yaml_config_generator): and writes the YAML content to a specified file. It dynamically handles multiple network elements and their respective filters. Args: - yaml_config_generator (dict): Contains file_path, global_filters, and component_specific_filters. + yaml_config_generator (dict): Contains generate_all_configurations, + global_filters, and component_specific_filters. Returns: self: The current instance with the operation result and message updated. @@ -2934,7 +3194,7 @@ def yaml_config_generator(self, yaml_config_generator): yaml_config_generator.get("generate_all_configurations", False) ) - file_path = yaml_config_generator.get("file_path") or self.generate_filename() + file_path = self.params.get("file_path") or self.generate_filename() self.log("YAML output file path resolved: {0}".format(file_path), "DEBUG") module_supported_network_elements = self.module_schema.get("network_elements", {}) @@ -3270,7 +3530,9 @@ def yaml_config_generator(self, yaml_config_generator): self.set_operation_result("success", False, self.msg, "WARNING") return self - file_mode = yaml_config_generator.get("file_mode", "overwrite") + file_mode = self.params.get("file_mode", "overwrite") + if not self.params.get("file_path"): + file_mode = "overwrite" self.log( "YAML configuration file path determined: {0}, file_mode: {1}".format(file_path, file_mode), @@ -3401,7 +3663,7 @@ def write_dicts_to_yaml(self, dicts_list, file_path, file_mode, dumper=None): final_yaml = header_comments + "\n---\n" + "\n---\n".join(serialized_documents) + "\n" self.ensure_directory_exists(file_path) - with open(file_path, "w", encoding="utf-8") as yaml_file: + with open(file_path, open_mode, encoding="utf-8") as yaml_file: yaml_file.write(final_yaml) self.log("YAML documents written successfully to {0}.".format(file_path), "INFO") @@ -3955,7 +4217,14 @@ def main(): "validate_response_schema": {"type": "bool", "default": True}, "dnac_api_task_timeout": {"type": "int", "default": 1200}, "dnac_task_poll_interval": {"type": "int", "default": 2}, - "config": {"required": True, "type": "dict"}, + "file_path": {"type": "str", "required": False}, + "file_mode": { + "type": "str", + "required": False, + "default": "overwrite", + "choices": ["overwrite", "append"], + }, + "config": {"required": False, "type": "dict"}, "state": {"default": "gathered", "choices": ["gathered"]}, } diff --git a/plugins/modules/network_profile_switching_playbook_config_generator.py b/plugins/modules/network_profile_switching_playbook_config_generator.py index de3cf9b191..7fb86bc5ca 100644 --- a/plugins/modules/network_profile_switching_playbook_config_generator.py +++ b/plugins/modules/network_profile_switching_playbook_config_generator.py @@ -398,11 +398,6 @@ def validate_input(self): # Expected schema for configuration parameters # Define expected schema for configuration parameters temp_spec = { - "generate_all_configurations": { - "type": "bool", - "required": False, - "default": False - }, "global_filters": { "type": "dict", "required": False @@ -413,15 +408,6 @@ def validate_input(self): valid_temp = self.validate_config_dict(self.config, temp_spec) self.validate_invalid_params(self.config, set(temp_spec.keys())) - if valid_temp.get("generate_all_configurations"): - self.msg = ( - "generate_all_configurations cannot be used when config is provided. " - "Omit config to generate all switch profile configurations." - ) - self.log(self.msg, "ERROR") - self.set_operation_result("failed", False, self.msg, "ERROR") - return self - if not valid_temp.get("global_filters"): self.msg = ( "Validation failed: global_filters is required when config is provided." @@ -457,8 +443,9 @@ def validate_input(self): if not provided_filters: self.msg = ( - "global_filters provided but no valid filter lists have values. " - "At least one of {0} must contain values.".format(valid_filter_keys) + f"Invalid filter key '{', '.join(global_filters.keys())}' in global_filters " + f"or no filter values for {', '.join(global_filters.keys())} provided. " + f"Supported keys are: {', '.join(valid_filter_keys)}" ) self.log(self.msg, "ERROR") self.set_operation_result("failed", False, self.msg, "ERROR") @@ -476,21 +463,20 @@ def validate_input(self): self.set_operation_result("failed", False, self.msg, "ERROR") return self + valid_temp["global_filters"][filter_key] = list(dict.fromkeys(filter_value)) + # Set validated configuration and return success self.validated_config = valid_temp self.msg = ( - "Successfully validated configuration for network profile switching playbook " - "generation. Validated configuration: {0}".format(str(valid_temp)) + f"Successfully validated configuration for network profile switching playbook " + f"generation. Validated configuration: {str(valid_temp)}" ) self.log( - "Input validation completed successfully. generate_all: {0}, " - "has_global_filters: {1}, file_mode: {2}".format( - bool(valid_temp.get("generate_all_configurations")), - bool(valid_temp.get("global_filters")), - self.params.get("file_mode", "overwrite") - ), + "Input validation completed successfully. " + f"has_global_filters: {bool(valid_temp.get('global_filters'))}, " + f"file_mode: {self.params.get('file_mode', 'overwrite')}", "INFO" ) @@ -1821,11 +1807,10 @@ def yaml_config_generator(self, yaml_config_generator): "WARNING" ) self.msg = ( - "No configurations or components to process for module '{0}'. Verify input " - "filters (global_filters) or configuration (generate_all_configurations). " - "Check that switch profiles exist in Catalyst Center and match filter criteria.".format( - self.module_name - ) + f"No configurations to process for the module '{self.module_name}'. Verify input " + "of (global_filters) to ensure it matches existing switch profiles in Catalyst Center. " + "If filters are correct, " + "check that switch profiles exist in Catalyst Center and match filter criteria." ) self.set_operation_result("success", False, self.msg, "INFO") return self diff --git a/plugins/modules/network_profile_wireless_playbook_config_generator.py b/plugins/modules/network_profile_wireless_playbook_config_generator.py index 3851418456..c4f78717aa 100644 --- a/plugins/modules/network_profile_wireless_playbook_config_generator.py +++ b/plugins/modules/network_profile_wireless_playbook_config_generator.py @@ -69,7 +69,7 @@ configuration file. - If C(config) is provided, C(global_filters) is mandatory. - If C(config) is omitted, internal auto-discovery mode is used - and generate_all_configurations defaults to C(True). + and generate all configurations. type: dict required: false suboptions: @@ -77,24 +77,17 @@ description: - Global filters to apply when generating the YAML configuration file. - - These filters apply to all components unless overridden - by component-specific filters. + - These filters apply to all global filters unless overridden + by generate all configuration. - At least one filter type must be specified to identify target devices. - - Filter priority (highest to lowest) is profile_name_list, - day_n_template_list, site_list, ssid_list, ap_zone_list, - feature_template_list, additional_interface_list. - - Only the highest priority filter with valid data will - be processed. type: dict required: false suboptions: profile_name_list: description: - List of wireless profile names to extract - configurations from. - - HIGHEST PRIORITY - Used first if provided with - valid data. + configurations from and include in the generated YAML file. - Wireless Profile names must match those registered in Catalyst Center. - Case-sensitive and must be exact matches. @@ -108,8 +101,6 @@ day_n_template_list: description: - List of Day-N templates to filter wireless profiles. - - MEDIUM-HIGH PRIORITY - Only used if profile_name_list - is not provided. - Retrieves all wireless profiles containing any of the specified templates. - Case-sensitive and must be exact matches. @@ -123,9 +114,6 @@ site_list: description: - List of site hierarchies to filter wireless profiles. - - MEDIUM PRIORITY - Only used if neither - profile_name_list nor day_n_template_list are - provided. - Retrieves all wireless profiles assigned to any of the specified sites. - Case-sensitive and must be exact matches. @@ -139,8 +127,6 @@ ssid_list: description: - List of SSIDs to filter wireless profiles. - - MEDIUM-LOW PRIORITY - Only used if profile_name_list, - day_n_template_list, and site_list are not provided. - Retrieves all wireless profiles containing any of the specified SSIDs. - Case-sensitive and must be exact matches. @@ -151,8 +137,6 @@ ap_zone_list: description: - List of AP zones to filter wireless profiles. - - LOW PRIORITY - Only used if higher priority filters - are not provided. - Retrieves all wireless profiles containing any of the specified AP zones. - Case-sensitive and must be exact matches. @@ -163,8 +147,6 @@ feature_template_list: description: - List of feature templates to filter wireless profiles. - - LOWER PRIORITY - Only used if higher priority filters - are not provided. - Retrieves all wireless profiles containing any of the specified feature templates. - Case-sensitive and must be exact matches. @@ -176,8 +158,6 @@ additional_interface_list: description: - List of additional interfaces to filter wireless profiles. - - LOWEST PRIORITY - Only used if all other filters - are not provided. - Retrieves all wireless profiles containing any of the specified additional interfaces. - Case-sensitive and must be exact matches. @@ -477,9 +457,6 @@ def __init__(self, module): self.log("Initialized NetworkProfileWirelessPlaybookGenerator class instance.", "DEBUG") self.log(self.module_schema, "DEBUG") - # Initialize generate_all_configurations as class-level parameter - self.generate_all_configurations = False - def validate_input(self): """ This function performs comprehensive validation of input configuration parameters @@ -506,7 +483,7 @@ def validate_input(self): self.config = {"generate_all_configurations": True} self.validated_config = self.config self.msg = ( - "Config not provided. Defaulting to generate_all_configurations=True " + "Config not provided. Defaulting to generate all configurations " "for complete wireless profile discovery." ) self.log(self.msg, "INFO") @@ -523,11 +500,6 @@ def validate_input(self): return self temp_spec = { - "generate_all_configurations": { - "type": "bool", - "required": False, - "default": False - }, "global_filters": { "type": "dict", "required": False @@ -537,16 +509,7 @@ def validate_input(self): valid_temp = self.validate_config_dict(self.config, temp_spec) self.validate_invalid_params(self.config, set(temp_spec.keys())) - if valid_temp.get("generate_all_configurations"): - self.msg = ( - "generate_all_configurations cannot be used when config is provided. " - "Omit config to generate all wireless profile configurations." - ) - self.log(self.msg, "ERROR") - self.set_operation_result("failed", False, self.msg, "ERROR") - return self - - if not valid_temp.get("global_filters"): + if not valid_temp.get("global_filters", False): self.msg = ( "Validation failed: global_filters is required when config is provided." ) @@ -584,21 +547,38 @@ def validate_input(self): self.set_operation_result("failed", False, self.msg, "ERROR") return self + for filter_index, filter_key in enumerate(filter_keys, start=1): + self.log( + f"Validating filter {filter_index}/{len(filter_keys)}: " + f"'{filter_key}'. Checking type and removing duplicates.", + "DEBUG" + ) + + filter_value = global_filters.get(filter_key) + if not isinstance(filter_value, list) or not filter_value: + self.msg = ( + f"Filter '{filter_key}' must be a non-empty list of strings. " + f"Invalid value: {filter_value}. Please provide valid filter values." + ) + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Remove the duplicate values from the filter list if any and maintain the order of the list + valid_temp["global_filters"][filter_key] = list(dict.fromkeys(filter_value)) + # Set validated configuration and return success self.validated_config = valid_temp self.msg = ( "Successfully validated configuration for network profile wireless " - "playbook generation. Validated configuration: {0}".format(str(valid_temp)) + f"playbook generation. Validated configuration: {str(valid_temp)}" ) self.log( - "Input validation completed successfully. generate_all: {0}, " - "has_global_filters: {1}, file_mode: {2}".format( - bool(valid_temp.get("generate_all_configurations")), - bool(valid_temp.get("global_filters")), - self.params.get("file_mode", "overwrite") - ), + "Input validation completed successfully. " + f"has_global_filters: {bool(valid_temp.get('global_filters'))}, " + f"file_mode: {self.params.get('file_mode', 'overwrite')}", "INFO" ) @@ -851,14 +831,6 @@ def collect_all_wireless_profile_list(self, profile_names=None): self.have.setdefault("wireless_profile_info", {})[ profile_id ] = profile_info - self.log( - f"Profile {profile_index}/{len(profile_names)} '{profile}' " - "not found in wireless_profile_list with " - f"{len(self.have['wireless_profile_list'])} profiles. " - "Adding to non_existing_profiles list for batch " - "error reporting.", - "WARNING" - ) else: self.log( f"Profile ID not found for profile '{profile}' despite existence " @@ -1184,7 +1156,7 @@ def process_global_filters(self, global_filters): if profile_names and isinstance(profile_names, list): self.log( - f"Applying HIGHEST PRIORITY filter: profile_name_list with {len(profile_names)} profile(s): " + f"Applying filter: profile_name_list with {len(profile_names)} profile(s): " f"{profile_names}. Matching against wireless_profile_names with " f"{len(self.have.get('wireless_profile_names', []))} cached profile(s). " "Processing only profiles present in both lists.", @@ -1237,9 +1209,10 @@ def process_global_filters(self, global_filters): "types due to hierarchical priority.", "INFO" ) - elif day_n_templates and isinstance(day_n_templates, list): + + if day_n_templates and isinstance(day_n_templates, list): self.log( - "Applying SECOND PRIORITY filter: day_n_template_list " + "Applying filter: day_n_template_list " f"with {len(day_n_templates)} template(s): " f"{len(day_n_templates)}. Matching against wireless_profile_templates " f"with {len(self.have.get('wireless_profile_templates', {}))} cached profile(s). " @@ -1319,9 +1292,10 @@ def process_global_filters(self, global_filters): "filter types due to hierarchical priority.", "INFO" ) - elif site_list and isinstance(site_list, list): + + if site_list and isinstance(site_list, list): self.log( - "Applying THIRD PRIORITY filter: site_list with " + "Applying filter: site_list with " f"{len(site_list)} site(s): {site_list}. " "Matching against wireless_profile_sites with " f"{len(self.have.get('wireless_profile_sites', {}))} cached profile(s). " @@ -1400,9 +1374,10 @@ def process_global_filters(self, global_filters): "types due to hierarchical priority.", "INFO" ) - elif ssid_list and isinstance(ssid_list, list): + + if ssid_list and isinstance(ssid_list, list): self.log( - "Applying FOURTH PRIORITY filter: ssid_list with " + "Applying filter: ssid_list with " f"{len(ssid_list)} SSID(s): {ssid_list}. " "Matching against wireless_profile_info ssidDetails " f"with {len(self.have.get('wireless_profile_info', {}))} cached " @@ -1486,9 +1461,10 @@ def process_global_filters(self, global_filters): "types due to hierarchical priority.", "INFO" ) - elif ap_zone_list and isinstance(ap_zone_list, list): + + if ap_zone_list and isinstance(ap_zone_list, list): self.log( - f"Applying FIFTH PRIORITY filter: ap_zone_list with {len(ap_zone_list)} " + f"Applying filter: ap_zone_list with {len(ap_zone_list)} " f"AP zone(s): {ap_zone_list}. " "Matching against wireless_profile_info apZones with " f"{len(self.have.get('wireless_profile_info', {}))} cached profile(s). " @@ -1572,9 +1548,10 @@ def process_global_filters(self, global_filters): "types due to hierarchical priority.", "INFO" ) - elif feature_template_list and isinstance(feature_template_list, list): + + if feature_template_list and isinstance(feature_template_list, list): self.log( - f"Applying SIXTH PRIORITY filter: feature_template_list " + f"Applying filter: feature_template_list " f"with {len(feature_template_list)} template(s): " f"{feature_template_list}. Matching against wireless_profile_info featureTemplates " f"with {len(self.have.get('wireless_profile_info', {}))} cached " @@ -1661,9 +1638,10 @@ def process_global_filters(self, global_filters): "remaining filter types due to hierarchical priority.", "INFO" ) - elif additional_interface_list and isinstance(additional_interface_list, list): + + if additional_interface_list and isinstance(additional_interface_list, list): self.log( - f"Applying LOWEST PRIORITY filter: additional_interface_list with {len(additional_interface_list)} " + f"Applying filter: additional_interface_list with {len(additional_interface_list)} " f"interface(s): {additional_interface_list}. Matching against wireless_profile_info " f"additionalInterfaces with {len(self.have.get('wireless_profile_info', {}))} " "cached profile(s). Processing profiles " @@ -1749,12 +1727,6 @@ def process_global_filters(self, global_filters): "types processed.", "INFO" ) - else: - self.log( - "No specific global filters provided or no filters matched expected list " - "types. No filter-based processing performed. Filters received: {global_filters}", - "WARNING" - ) if not final_list: self.log( @@ -2101,9 +2073,6 @@ def yaml_config_generator(self, yaml_config_generator): Args: yaml_config_generator (dict): Configuration parameters containing: - - generate_all_configurations (bool, optional): - Auto-discovery mode flag enabling complete - infrastructure extraction - global_filters (dict, optional): Filter criteria with profile_name_list, day_n_template_list, site_list, ssid_list, @@ -2130,10 +2099,10 @@ def yaml_config_generator(self, yaml_config_generator): ) # Check if generate_all_configurations mode is enabled - generate_all = yaml_config_generator.get("generate_all_configurations", False) - if generate_all: + global_filters = yaml_config_generator.get("global_filters", False) + if not global_filters: self.log( - "Auto-discovery mode enabled (generate_all_configurations=True). Will " + "Auto-discovery mode enabled to generate all configurations. Will " "extract all wireless profiles with CLI templates, site assignments, " "SSIDs, AP zones, feature templates, and additional interfaces without " "filter restrictions for complete network wireless profile configuration.", @@ -2141,7 +2110,7 @@ def yaml_config_generator(self, yaml_config_generator): ) else: self.log( - "Targeted extraction mode (generate_all_configurations=False). Will " + "Targeted extraction mode (global_filters provided). Will " "apply provided global filters for selective profile and component " "retrieval based on filter criteria.", "DEBUG" @@ -2189,7 +2158,7 @@ def yaml_config_generator(self, yaml_config_generator): # Set empty filters to retrieve everything global_filters = {} final_list = [] - if generate_all: + if not global_filters: self.log( "Auto-discovery mode: Extracting all wireless profiles from cached " f"wireless_profile_names with {len(self.have.get('wireless_profile_names', []))} " @@ -2397,7 +2366,7 @@ def yaml_config_generator(self, yaml_config_generator): ) if yaml_config_generator.get("global_filters"): self.log( - "Warning: generate_all_configurations is False but global_filters " + "Warning: global_filters " "provided. This is expected for targeted extraction. Filters will be " "applied to retrieve matching profiles.", "DEBUG" @@ -2431,13 +2400,13 @@ def yaml_config_generator(self, yaml_config_generator): self.log( "No global_filters provided in targeted extraction mode. No profiles " "will be collected. Verify configuration includes either " - "generate_all_configurations=True or global_filters.", + "generate all configuration or global_filters.", "WARNING" ) if not final_list: self.log( - f"No configurations retrieved after processing. Auto-discovery mode: {generate_all}, " + f"No configurations retrieved after processing. " f"Global filters provided: {bool(yaml_config_generator.get('global_filters'))}. " "All filters may have excluded available " "profiles or no profiles exist in Catalyst Center.", @@ -2489,7 +2458,7 @@ def yaml_config_generator(self, yaml_config_generator): self.log( "YAML configuration generation completed successfully. Summary - " f"File: {file_path}, Profiles: {len(final_list)}, " - f"Auto-discovery: {generate_all}. Operation result set " + "Auto-discovery: generate all configurations. Operation result set " "to 'success'.", "INFO" ) @@ -3139,8 +3108,6 @@ def get_want(self, config, state): Args: config (dict): Configuration data containing: - - generate_all_configurations (bool, optional): Auto-discovery - mode flag for complete infrastructure extraction - global_filters (dict, optional): Filter criteria with profile_name_list, day_n_template_list, site_list, ssid_list, ap_zone_list, feature_template_list, @@ -3213,8 +3180,6 @@ def get_have(self, config): Args: config (dict): Configuration data containing: - - generate_all_configurations (bool, optional): Auto-discovery - mode flag enabling complete profile collection - global_filters (dict, optional): Filter criteria with profile_name_list, day_n_template_list, site_list, ssid_list, ap_zone_list, feature_template_list, additional_interface_list @@ -3248,13 +3213,14 @@ def get_have(self, config): self.log( "Configuration received with type verification passed. Checking for " - "generate_all_configurations flag to determine collection mode.", + "global_filters to determine collection mode.", "DEBUG" ) - if config.get("generate_all_configurations", False): + global_filters = config.get("global_filters") + if global_filters is None: self.log( - "Auto-discovery mode enabled (generate_all_configurations=True). " + "Auto-discovery mode enabled to generate all configurations. " "Collecting all wireless profile details without filter restrictions for " "complete network wireless profile configuration extraction.", "INFO" @@ -3297,7 +3263,6 @@ def get_have(self, config): "DEBUG" ) - global_filters = config.get("global_filters") if global_filters: self.log( f"Global filters provided: {global_filters}. Extracting filter criteria for profile " diff --git a/plugins/modules/network_settings_workflow_manager.py b/plugins/modules/network_settings_workflow_manager.py index 62975a8409..ebc23d584d 100644 --- a/plugins/modules/network_settings_workflow_manager.py +++ b/plugins/modules/network_settings_workflow_manager.py @@ -6938,6 +6938,12 @@ def reset_values(self): self.have.clear() self.want.clear() + self.result["response"] = [ + {"globalPool": {"response": {}, "msg": {}}}, + {"reservePool": {"response": {}, "msg": {}}}, + {"network": {"response": {}, "msg": {}}}, + {"device_controllability": {"response": {}, "msg": {}}} + ] return diff --git a/plugins/modules/sda_extranet_policies_playbook_config_generator.py b/plugins/modules/sda_extranet_policies_playbook_config_generator.py index 208053c53b..678fb32e22 100644 --- a/plugins/modules/sda_extranet_policies_playbook_config_generator.py +++ b/plugins/modules/sda_extranet_policies_playbook_config_generator.py @@ -456,6 +456,10 @@ def validate_input(self): return self self.auto_populate_and_validate_components_list(component_specific_filters) + # Deduplicate user-provided filters to avoid issuing redundant API calls + # for the same policy name. Note: API-level deduplication is done separately + # in get_extranet_policies_configuration() for paginated response overlap. + self.deduplicate_component_filters(component_specific_filters) # Set the validated configuration and update the result with success status self.validated_config = valid_temp @@ -534,10 +538,11 @@ def transform_fabric_site_ids_to_names(self, extranet_policy_details): - Other policy details (not processed by this method) Returns: - list[str]: Fabric site name hierarchies in order: + list[str] | None: Fabric site name hierarchies in order: - Format: "Global/Region/Site/Building" - Only includes successfully resolved site names - - Returns empty list if no fabricIds or resolution failures + - Returns None if no fabricIds found, or if all fabric IDs + fail to resolve (empty result list) Processing Flow: 1. Extract fabricIds list from policy details @@ -573,11 +578,10 @@ def transform_fabric_site_ids_to_names(self, extranet_policy_details): fabric_ids = extranet_policy_details.get("fabricIds", []) if not fabric_ids: self.log( - "No fabric IDs found in extranet policy " - "details, returning empty list", + "No fabric IDs found in extranet policy details, returning None", "DEBUG", ) - return [] + return None self.log( "Processing {0} fabric ID(s) for site name " @@ -618,7 +622,7 @@ def transform_fabric_site_ids_to_names(self, extranet_policy_details): ), "DEBUG", ) - return fabric_site_names + return fabric_site_names if fabric_site_names else None def extranet_policy_temp_spec(self): """ @@ -762,7 +766,7 @@ def get_extranet_policies_configuration(self, network_element, filters=None): ... ] } - Returns {"extranet_policies": []} if no policies found + Returns None if no policies found Processing Workflow: 1. Extract API family and function from network_element @@ -779,11 +783,16 @@ def get_extranet_policies_configuration(self, network_element, filters=None): 5. For full retrieval: - Execute paginated API call with empty params - Collect all policies from Catalyst Center - 6. Transform results: + 6. Deduplicate output policies before transformation using the unique policy + name (extranetPolicyName) as key: + - Track seen policy names in a set + - Skip policies with duplicate names + - Log count of removed duplicates if any + 7. Transform results: - Generate extranet_policy_temp_spec() - Apply modify_parameters(temp_spec, policies) - Convert API format to YAML format - 7. Return structured result dictionary + 8. Return structured result dictionary API Integration: - Family: sda @@ -823,8 +832,8 @@ def get_extranet_policies_configuration(self, network_element, filters=None): Error Handling: - API failures: Logged and propagated to calling function - - Empty results: Returns empty list, not error - - Invalid filter names: Logged as warning, skipped + - Empty results: Returns None, not an error + - Invalid filter names: Logged as a warning and skipped - Failed transformations: Logged and may cause failures Logging: @@ -915,11 +924,31 @@ def get_extranet_policies_configuration(self, network_element, filters=None): if not final_extranet_policies: self.log( "No extranet policies found matching the " - "specified filters. Returning empty " + "specified filters. Returning None " "result.", "WARNING", ) - return {"extranet_policies": []} + return None + + # Deduplicate output policies before transformation using the unique policy name as key + original_count = len(final_extranet_policies) + seen_policy_names = set() + deduped_policies = [] + for policy in final_extranet_policies: + policy_name = policy.get("extranetPolicyName") + if policy_name not in seen_policy_names: + seen_policy_names.add(policy_name) + deduped_policies.append(policy) + final_extranet_policies = deduped_policies + dedup_count = original_count - len(final_extranet_policies) + if dedup_count > 0: + self.log( + "Removed {0} duplicate extranet policy(ies) from API results. " + "Original count: {1}, After dedup: {2}".format( + dedup_count, original_count, len(final_extranet_policies) + ), + "INFO", + ) self.log( "Transforming {0} extranet policy(ies) using " diff --git a/plugins/modules/sda_fabric_sites_zones_playbook_config_generator.py b/plugins/modules/sda_fabric_sites_zones_playbook_config_generator.py index da77b9d59d..86a1376713 100644 --- a/plugins/modules/sda_fabric_sites_zones_playbook_config_generator.py +++ b/plugins/modules/sda_fabric_sites_zones_playbook_config_generator.py @@ -129,11 +129,11 @@ - |- Module result behavior (changed/ok/failed): The module result reflects local file state only, not Catalyst Center state. - In overwrite mode, the full file content is compared (excluding volatile - fields like timestamps and playbook path). In append mode, only the last - YAML document in the file is compared against the newly generated - configuration. If a file contains multiple config entries from previous - appends, only the most recent entry is used for the idempotency check. + In overwrite mode, the full generated YAML content is compared against the + existing file after excluding generated header comment lines. In append mode, + only the last YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous appends, + only the most recent entry is used for the idempotency check. - changed=true (status: success): The generated YAML configuration differs from the existing output file (or the file does not exist). The file was written and the configuration was updated. @@ -144,6 +144,11 @@ API failure, or file write error. No file was written or modified. Note: Re-running with identical inputs and unchanged Catalyst Center state will produce changed=false, ensuring idempotent playbook behavior. + Note: If append mode creates multiple config entries in the + generated file, replaying the file as config in the workflow + manager module applies only the last config entry because + yaml.safe_load uses last-key-wins semantics for duplicate + keys in a single YAML document. seealso: - module: cisco.dnac.sda_fabric_sites_zones_workflow_manager description: Module to manage SD-Access Fabric Sites and Zones in Cisco Catalyst Center. @@ -432,14 +437,14 @@ def get_workflow_filters_schema(self): schema = { "network_elements": { "fabric_sites": { - "filters": ["site_name_hierarchy"], + "filters": {"site_name_hierarchy": {"type": "str"}}, "reverse_mapping_function": self.fabric_site_temp_spec, "api_function": "get_fabric_sites", "api_family": "sda", "get_function_name": self.get_fabric_sites_from_ccc, }, "fabric_zones": { - "filters": ["site_name_hierarchy"], + "filters": {"site_name_hierarchy": {"type": "str"}}, "reverse_mapping_function": self.fabric_zone_temp_spec, "api_function": "get_fabric_zones", "api_family": "sda", @@ -1012,11 +1017,9 @@ def yaml_config_generator(self, yaml_config_generator): "DEBUG" ) - additional_config_headers = None - if generate_all: - additional_config_headers = [ - "Full configuration generates all fabric sites first, followed by all fabric zones." - ] + additional_config_headers = [ + "Full configuration generates all fabric sites first, followed by all fabric zones." + ] file_written = self.write_dict_to_yaml( yaml_config_dict, diff --git a/plugins/modules/sda_fabric_transits_playbook_config_generator.py b/plugins/modules/sda_fabric_transits_playbook_config_generator.py index 0d6f79242b..e5ac1bbfdc 100644 --- a/plugins/modules/sda_fabric_transits_playbook_config_generator.py +++ b/plugins/modules/sda_fabric_transits_playbook_config_generator.py @@ -126,11 +126,11 @@ - |- Module result behavior (changed/ok/failed): The module result reflects local file state only, not Catalyst Center state. - In overwrite mode, the full file content is compared (excluding volatile - fields like timestamps and playbook path). In append mode, only the last - YAML document in the file is compared against the newly generated - configuration. If a file contains multiple config entries from previous - appends, only the most recent entry is used for the idempotency check. + In overwrite mode, the full generated YAML content is compared against the + existing file after excluding generated header comment lines. In append mode, + only the last YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous appends, + only the most recent entry is used for the idempotency check. - changed=true (status: success): The generated YAML configuration differs from the existing output file (or the file does not exist). The file was written and the configuration was updated. @@ -141,6 +141,11 @@ API failure, or file write error. No file was written or modified. Note: Re-running with identical inputs and unchanged Catalyst Center state will produce changed=false, ensuring idempotent playbook behavior. + Note: If append mode creates multiple config entries in the + generated file, replaying the file as config in the workflow + manager module applies only the last config entry because + yaml.safe_load uses last-key-wins semantics for duplicate + keys in a single YAML document. seealso: - module: cisco.dnac.sda_fabric_transits_workflow_manager description: Module for managing fabric transits in Cisco Catalyst Center. @@ -418,7 +423,10 @@ def get_workflow_filters_schema(self): schema = { "network_elements": { "sda_fabric_transits": { - "filters": ["name", "transit_type"], + "filters": { + "name": {"type": "str"}, + "transit_type": {"type": "str"} + }, "reverse_mapping_function": self.fabric_transit_temp_spec, "api_function": "get_transit_networks", "api_family": "sda", @@ -737,7 +745,7 @@ def get_fabric_transits_configuration(self, network_element, filters): modified_fabric_transits_details = {} if transit_details: - modified_fabric_transits_details["fabric_transits"] = transit_details + modified_fabric_transits_details["sda_fabric_transits"] = transit_details self.log( "Completed retrieving fabric transit(s): {0}".format( diff --git a/plugins/modules/sda_fabric_virtual_networks_playbook_config_generator.py b/plugins/modules/sda_fabric_virtual_networks_playbook_config_generator.py index cdf3aa5a97..dab9dfae6c 100644 --- a/plugins/modules/sda_fabric_virtual_networks_playbook_config_generator.py +++ b/plugins/modules/sda_fabric_virtual_networks_playbook_config_generator.py @@ -171,11 +171,11 @@ - |- Module result behavior (changed/ok/failed): The module result reflects local file state only, not Catalyst Center state. - In overwrite mode, the full file content is compared (excluding volatile - fields like timestamps and playbook path). In append mode, only the last - YAML document in the file is compared against the newly generated - configuration. If a file contains multiple config entries from previous - appends, only the most recent entry is used for the idempotency check. + In overwrite mode, the full generated YAML content is compared against the + existing file after excluding generated header comment lines. In append mode, + only the last YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous appends, + only the most recent entry is used for the idempotency check. - changed=true (status: success): The generated YAML configuration differs from the existing output file (or the file does not exist). The file was written and the configuration was updated. @@ -186,6 +186,11 @@ API failure, or file write error. No file was written or modified. Note: Re-running with identical inputs and unchanged Catalyst Center state will produce changed=false, ensuring idempotent playbook behavior. + Note: If append mode creates multiple config entries in the + generated file, replaying the file as config in the workflow + manager module applies only the last config entry because + yaml.safe_load uses last-key-wins semantics for duplicate + keys in a single YAML document. seealso: - module: cisco.dnac.sda_fabric_virtual_networks_workflow_manager description: Module for managing fabric VLANs, Virtual Networks, @@ -687,21 +692,31 @@ def get_workflow_elements_schema(self): schema = { "network_elements": { "fabric_vlan": { - "filters": ["vlan_name", "vlan_id"], + "filters": { + "vlan_name": {"type": "str"}, + "vlan_id": {"type": "int"} + }, "reverse_mapping_function": self.fabric_vlan_temp_spec, "api_function": "get_layer2_virtual_networks", "api_family": "sda", "get_function_name": self.get_fabric_vlans_configuration, }, "virtual_networks": { - "filters": ["vn_name"], + "filters": { + "vn_name": {"type": "str"} + }, "reverse_mapping_function": self.virtual_network_temp_spec, "api_function": "get_layer3_virtual_networks", "api_family": "sda", "get_function_name": self.get_virtual_networks_configuration, }, "anycast_gateways": { - "filters": ["vn_name", "vlan_id", "vlan_name", "ip_pool_name"], + "filters": { + "vn_name": {"type": "str"}, + "vlan_id": {"type": "int"}, + "vlan_name": {"type": "str"}, + "ip_pool_name": {"type": "str"} + }, "reverse_mapping_function": self.anycast_gateway_temp_spec, "api_function": "get_anycast_gateways", "api_family": "sda", diff --git a/plugins/modules/site_playbook_config_generator.py b/plugins/modules/site_playbook_config_generator.py index b8ce40ea82..f9b0ca1b6e 100644 --- a/plugins/modules/site_playbook_config_generator.py +++ b/plugins/modules/site_playbook_config_generator.py @@ -552,10 +552,20 @@ def validate_input(self): valid_temp = self.validate_config_dict(self.config, temp_spec) self.validate_invalid_params(self.config, temp_spec.keys()) - # Auto-populate components_list from component filters if needed + # component_specific_filters is mandatory when config is provided. + # Catches both missing (None) and empty ({}) - same pattern as tags module. component_specific_filters = valid_temp.get("component_specific_filters") - if component_specific_filters: - self.auto_populate_and_validate_components_list(component_specific_filters) + if not component_specific_filters: + self.msg = ( + "'component_specific_filters' is required when 'config' is provided and must not be empty. " + "Either omit 'config' entirely to generate all configurations, " + "or define 'component_specific_filters' with at least one filter block (e.g., 'site')." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Auto-populate components_list from component filters if needed + self.auto_populate_and_validate_components_list(component_specific_filters) invalid_params = self.validate_component_specific_filters_structure(valid_temp) if invalid_params: @@ -4004,7 +4014,7 @@ def resolve_component_filters(self, component_specific_filters): Supported payload: - Helper-wrapped form from BrownFieldHelper: - `{"global_filters": {...}, "component_specific_filters": [...]}`. + `{" ": {...}, "component_specific_filters": [...]}`. - Direct list form for internal/unit-test invocation: `[{"site_name_hierarchy": ...}, ...]`. diff --git a/plugins/modules/site_workflow_manager.py b/plugins/modules/site_workflow_manager.py index ca3eb41da7..f65778eda3 100644 --- a/plugins/modules/site_workflow_manager.py +++ b/plugins/modules/site_workflow_manager.py @@ -2049,8 +2049,6 @@ def get_diff_merged(self, config): except Exception as e: self.log("Yaml is not available for bulk: {}".format(str(e)), "ERROR") - return self - else: site_params = self.want.get("site_params") site_type = site_params.get("type") @@ -2158,7 +2156,6 @@ def get_diff_merged(self, config): site_name_hierarchy = self.want.get("site_name_hierarchy") self.created_site_list.append(str(site_type) + ": " + str(site_name_hierarchy)) self.log("Site '{0}' created successfully".format(site_name_hierarchy), "INFO") - return self except Exception as e: self.msg = "Unexpected error occurred while create: {0}".format(str(e)) @@ -2166,6 +2163,12 @@ def get_diff_merged(self, config): self.set_operation_result("failed", False, self.msg, "ERROR", site_name_hierarchy).check_return_status() + if self.created_site_list and len(self.update_not_needed_sites) < 1: + self.log(self.msg, "INFO") + self.set_operation_result("success", True, self.msg, "INFO", str(self.created_site_list)) + elif len(self.update_not_needed_sites) > 0: + self.update_site_messages().check_return_status() + return self def delete_single_site(self, site_id, site_name_hierarchy): diff --git a/plugins/modules/template_playbook_config_generator.py b/plugins/modules/template_playbook_config_generator.py index 4f2e3919c2..6a7ae6bd3d 100644 --- a/plugins/modules/template_playbook_config_generator.py +++ b/plugins/modules/template_playbook_config_generator.py @@ -148,11 +148,11 @@ - |- Module result behavior (changed/ok/failed): The module result reflects local file state only, not Catalyst Center state. - In overwrite mode, the full file content is compared (excluding volatile - fields like timestamps and playbook path). In append mode, only the last - YAML document in the file is compared against the newly generated - configuration. If a file contains multiple config entries from previous - appends, only the most recent entry is used for the idempotency check. + In overwrite mode, the full generated YAML content is compared against the + existing file after excluding generated header comment lines. In append mode, + only the last YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous appends, + only the most recent entry is used for the idempotency check. - changed=true (status: success): The generated YAML configuration differs from the existing output file (or the file does not exist). The file was written and the configuration was updated. @@ -163,6 +163,11 @@ API failure, or file write error. No file was written or modified. Note: Re-running with identical inputs and unchanged Catalyst Center state will produce changed=false, ensuring idempotent playbook behavior. + Note: If append mode creates multiple config entries in the + generated file, replaying the file as config in the workflow + manager module applies only the last config entry because + yaml.safe_load uses last-key-wins semantics for duplicate + keys in a single YAML document. seealso: - module: cisco.dnac.template_workflow_manager description: Module for managing template projects and templates. @@ -537,18 +542,20 @@ def get_workflow_elements_schema(self): schema = { "network_elements": { "projects": { - "filters": ["name"], + "filters": { + "name": {"type": "str"} + }, "reverse_mapping_function": self.projects_temp_spec, "api_function": "get_projects_details", "api_family": "configuration_templates", "get_function_name": self.get_template_projects_details }, "configuration_templates": { - "filters": [ - "template_name", - "project_name", - "include_uncommitted" - ], + "filters": { + "template_name": {"type": "str"}, + "project_name": {"type": "str"}, + "include_uncommitted": {"type": "bool"} + }, "reverse_mapping_function": self.templates_temp_spec, "api_function": "get_templates_details", "api_family": "configuration_templates", diff --git a/plugins/modules/template_workflow_manager.py b/plugins/modules/template_workflow_manager.py index 3fbda5bb7e..0478b2c65f 100644 --- a/plugins/modules/template_workflow_manager.py +++ b/plugins/modules/template_workflow_manager.py @@ -1301,6 +1301,61 @@ the template is built using multiple smaller templates. type: bool + member_template_deployment_info: + description: + - A list of member template deployment + details used when deploying a composite + template. + - Each entry describes a child template + and its deployment parameters. + - Required when is_composite is set to + true. + type: list + elements: dict + suboptions: + project_name: + description: + - Name of the project under which the member + template resides in Catalyst Center. + - Matched against the 'name' field of projects + returned by the Catalyst Center API. + - If omitted, defaults to the parent template's + project_name, allowing member templates in the + same project to be referenced without + repetition. + - Applies per member entry in the + member_template_deployment_info list. + - For example, 'Composite_Project'. + type: str + template_name: + description: Name of the member template + to deploy. + type: str + required: true + force_push_template: + description: Whether to force push the + member template to the device even if + the template has already been applied. + type: bool + default: true + copy_config: + description: Whether to copy the running + configuration to startup after applying + the member template. + type: bool + default: true + template_parameters: + description: A list of parameter name-value + pairs for customizing the member template. + type: list + elements: dict + suboptions: + param_name: + description: Name of the parameter. + type: str + param_value: + description: Value for the parameter. + type: str copy_config: description: - A boolean flag that specifies whether @@ -2078,6 +2133,76 @@ software_type: "IOS-XE" device_types: - product_family: Switches and Hubs + +- name: Deploy a composite template with member templates to devices based on device specific details + cisco.dnac.template_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + dnac_log_append: true + validate_response_schema: false + state: "merged" + config_verify: true + config: + - deploy_template: + project_name: "composite_project" + template_name: "composite_template" + is_composite: true + force_push: true + member_template_deployment_info: + - project_name: "composite_project" + template_name: "containing_template1" + copy_config: false + - project_name: "composite_project" + template_name: "containing_template2" + copy_config: false + device_details: + device_ips: + - 10.1.1.1 + +- name: Deploy a composite template with member templates using template_parameters to devices based on device specific details + cisco.dnac.template_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + dnac_log_append: true + validate_response_schema: false + state: "merged" + config_verify: true + config: + - deploy_template: + project_name: "composite_project" + template_name: "composite_template" + is_composite: true + force_push: true + member_template_deployment_info: + - project_name: "composite_project" + template_name: "containing_template1" + copy_config: false + template_parameters: + - param_name: "name" + param_value: "abc" + - project_name: "composite_project" + template_name: "containing_template2" + copy_config: false + template_parameters: + - param_name: "name" + param_value: "xyz" + device_details: + device_ips: + - 10.1.1.1 """ RETURN = r""" @@ -2353,6 +2478,20 @@ def validate_input(self): "template_name": {"type": "str"}, "force_push": {"type": "bool"}, "is_composite": {"type": "bool"}, + "member_template_deployment_info": { + "type": "list", + "elements": "dict", + "template_name": {"type": "str"}, + "project_name": {"type": "str"}, + "force_push_template": {"type": "bool", "default": True}, + "copy_config": {"type": "bool", "default": True}, + "template_parameters": { + "type": "list", + "elements": "dict", + "param_name": {"type": "str"}, + "param_value": {"type": "str"}, + }, + }, "copy_config": {"type": "bool", "default": True}, "template_parameters": { "type": "list", @@ -5734,12 +5873,144 @@ def create_payload_for_template_deploy(self, deploy_temp_details, device_ids): project_name, template_name, template_id ).check_return_status() + is_composite = deploy_temp_details.get("is_composite", False) + self.log( + "Preparing deployment payload for" + " template '{0}', is_composite={1}.".format( + template_name, is_composite), + "DEBUG", + ) + member_deployments = [] + + self.log( + "Preparing deployment payload for" + " template '{0}', is_composite={1}.".format( + template_name, is_composite), + "DEBUG", + ) deploy_payload = { "forcePushTemplate": deploy_temp_details.get("force_push", False), - "isComposite": deploy_temp_details.get("is_composite", False), + "isComposite": is_composite, "templateId": template_id, "copyingConfig": deploy_temp_details.get("copy_config", True), } + + # For composite templates, set mainTemplateId and build memberTemplateDeploymentInfo + if is_composite: + self.log( + "Processing composite template deployment" + " for template '{0}'.".format(template_name), + "INFO", + ) + deploy_payload["mainTemplateId"] = template_id + # Resolve versioned template ID for the composite parent + composite_version_id = self.get_latest_template_version_id(template_id, template_name) + self.log( + "Resolved composite template '{0}'" + " version ID: '{1}'.".format( + template_name, + composite_version_id or template_id), + "DEBUG", + ) + + if composite_version_id: + deploy_payload["templateId"] = composite_version_id + + member_info_list = deploy_temp_details.get("member_template_deployment_info", []) + if not member_info_list: + self.msg = ( + "Composite template '{0}' requires 'member_template_deployment_info' " + "to be provided in the playbook." + ).format(template_name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for idx, member in enumerate(member_info_list): + member_template_name = member.get("template_name") + self.log( + "Processing member template {0}/{1}" + " with name '{2}' for composite" + " template '{3}'.".format( + idx + 1, len(member_info_list), + member_template_name, template_name), + "DEBUG", + ) + if not member_template_name: + self.msg = ( + "Each entry in 'member_template_deployment_info' must include a 'template_name' " + "for composite template '{0}'." + ).format(template_name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + member_project_name = member.get("project_name") or project_name + member_response = self.get_project_defined_template_details( + member_project_name, member_template_name + ) + member_templates = member_response.get("response") if member_response else None + if not member_templates or not isinstance(member_templates, list): + self.msg = ( + "Member template '{0}' not found under project '{1}' or it is not versioned." + ).format(member_template_name, member_project_name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + member_template_id = member_templates[0].get("id") + if not member_template_id: + self.msg = ( + "Member template '{0}' under" + " project '{1}' has no valid ID." + ).format( + member_template_name, member_project_name + ) + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + self.log("Resolved member template '{0}' to ID '{1}'.".format( + member_template_name, member_template_id), "DEBUG") + + member_version_id = self.get_latest_template_version_id(member_template_id, member_template_name) + if not member_version_id: + member_version_id = member_template_id + + member_params = {} + member_template_params = member.get("template_parameters", []) + for idx, param in enumerate(member_template_params): + self.log( + "Processing parameter {0}/{1} for" + " member template '{2}'.".format( + idx + 1, len(member_template_params), + member_template_name), + "DEBUG", + ) + p_name = param.get("param_name") + p_value = param.get("param_value") + + if not p_name: + self.msg = ( + "Each template parameter in member" + " template '{0}' must include" + " 'param_name'." + ).format(member_template_name) + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + + member_params[p_name] = p_value + + member_deploy = { + "forcePushTemplate": member.get("force_push_template", True), + "isComposite": False, + "templateId": member_version_id, + "copyingConfig": member.get("copy_config", True), + "targetInfo": [], + } + + member_deployments.append({ + "deploy": member_deploy, + "params": member_params, + "version_id": member_version_id, + }) + + self.log("Built {0} member template deployment entries for composite template '{1}'.".format( + len(member_deployments), template_name), "DEBUG") self.log( "Handling template parameters for the deployment of template '{0}'.".format( template_name @@ -5761,14 +6032,16 @@ def create_payload_for_template_deploy(self, deploy_temp_details, device_ids): ) template_dict[name] = value - # Get the latest version template ID - version_template_id = self.get_latest_template_version_id(template_id, template_name) - if not version_template_id: - self.log( - "No versioning found for the template: {0}".format(template_name), - "INFO", - ) - version_template_id = template_id + # Get the latest version template ID (only needed for non-composite deployments) + version_template_id = None + if not is_composite: + version_template_id = self.get_latest_template_version_id(template_id, template_name) + if not version_template_id: + self.log( + "Using base template ID for '{0}' — no committed version found in Catalyst Center.".format(template_name), + "INFO", + ) + version_template_id = template_id self.log("Preparing to deploy template '{0}' to the following device IDs: '{1}'".format(template_name, device_ids), "DEBUG") for device_id in device_ids: @@ -5779,9 +6052,10 @@ def create_payload_for_template_deploy(self, deploy_temp_details, device_ids): target_device_dict = { "id": device_id, "type": "MANAGED_DEVICE_UUID", - "versionedTemplateId": version_template_id, "params": template_dict, } + if not is_composite: + target_device_dict["versionedTemplateId"] = version_template_id resource_params = deploy_temp_details.get("resource_parameters") self.log("Handling resource parameters for the deployment of template '{0}'.".format(template_name), "DEBUG") resource_params_list = [] @@ -5850,13 +6124,111 @@ def create_payload_for_template_deploy(self, deploy_temp_details, device_ids): del target_device_dict deploy_payload["targetInfo"] = target_info_list + + # For composite templates, populate memberTemplateDeploymentInfo with per-device target info + if is_composite: + member_template_deployment_info = [] + for idx, member_entry in enumerate(member_deployments): + self.log( + "Building target info for member" + " deployment {0}/{1}, templateId" + " '{2}'.".format( + idx + 1, len(member_deployments), + member_entry.get("version_id")), + "DEBUG", + ) + member_deploy = member_entry["deploy"] + member_params = member_entry["params"] + member_version_id = member_entry["version_id"] + member_resource_params = deploy_temp_details.get("resource_parameters", []) + + member_target_info_list = [] + for dev_idx, device_id in enumerate(device_ids): + self.log( + "Preparing member deployment target" + " {0}/{1} for device_id '{2}'" + " in member template" + " '{3}'.".format( + dev_idx + 1, len(device_ids), + device_id, + member_deploy.get("templateId")), + "DEBUG", + ) + member_target = { + "id": device_id, + "type": "MANAGED_DEVICE_UUID", + "params": member_params, + } + + member_res_list = [] + runtime_scopes_available = ["MANAGED_DEVICE_UUID", "MANAGED_DEVICE_IP", "MANAGED_DEVICE_HOSTNAME", "SITE_UUID"] + for res_idx, resource_param in enumerate(member_resource_params): + r_type = resource_param.get("resource_type") + self.log( + "Resolving resource param" + " {0}/{1} type='{2}' for" + " device '{3}'.".format( + res_idx + 1, + len(member_resource_params), + r_type, device_id), + "DEBUG", + ) + scope = resource_param.get("resource_scope", "RUNTIME") + resource_params_dict = {"type": r_type, "scope": scope} + if scope == "RUNTIME": + if r_type not in runtime_scopes_available: + self.msg = ( + "The resource type '{0}' with scope '{1}' is not supported for runtime provisioning. " + "Supported types are: {2}." + ).format(r_type, scope, ", ".join(runtime_scopes_available)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if r_type == "SITE_UUID": + value = self.get_site_uuid_from_device_id(device_id) + elif r_type == "MANAGED_DEVICE_UUID": + value = device_id + elif r_type == "MANAGED_DEVICE_IP": + device_ip_id_map = self.get_device_ips_from_device_ids([device_id]) + value = device_ip_id_map[device_id] + elif r_type == "MANAGED_DEVICE_HOSTNAME": + value = self.get_device_hostname_from_device_id(device_id) + + resource_params_dict['value'] = value + member_res_list.append(resource_params_dict) + self.log("Resolved runtime resource parameter '{0}' with scope '{1}' to value '{2}' for device '{3}'.".format( + r_type, scope, value, device_id), "DEBUG") + continue + + self.log("Processing resource parameter with type '{0}' and scope '{1}'.".format(r_type, scope), "DEBUG") + value = resource_param.get("resource_value") + if not value: + self.msg = ( + "The resource type '{0}' with scope '{1}' requires a value to be provided. " + "Please specify a value for this resource parameter." + ).format(r_type, scope) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + resource_params_dict["value"] = value + member_res_list.append(resource_params_dict) + + if member_res_list: + member_target["resourceParams"] = member_res_list + member_target_info_list.append(member_target) + + member_deploy["targetInfo"] = member_target_info_list + member_template_deployment_info.append(member_deploy) + + deploy_payload["memberTemplateDeploymentInfo"] = member_template_deployment_info + self.log("Added {0} member template deployment info entries to composite payload.".format( + len(member_template_deployment_info)), "DEBUG") + self.log( "Successfully generated deployment payload for template '{0}'.".format( template_name ), "INFO", ) - + self.log("Final deployment payload for template '{0}': {1}".format(template_name, deploy_payload), "DEBUG") return deploy_payload def monitor_template_deployment_status( @@ -6074,7 +6446,7 @@ def deploy_template_to_devices( "DEBUG", ) match = re.search( - r"Template\s+Deployemnt\s+Id:\s+([a-f0-9\-]+)", + r"Template\s+Deployment\s+Id:\s+([a-f0-9\-]+)", progress, re.IGNORECASE, ) @@ -6097,6 +6469,16 @@ def deploy_template_to_devices( self.monitor_template_deployment_status( template_name, deployment_id, device_ips ).check_return_status() + self.log( + "Deployment monitoring completed" + " for template '{0}' with" + " deployment ID '{1}'." + " Returning.".format( + template_name, + deployment_id), + "INFO", + ) + return self else: self.log( "Regex matched the progress message, but no Deployment ID was captured. " @@ -6107,13 +6489,22 @@ def deploy_template_to_devices( ) else: self.log( - "Deployment ID not found in the progress message. This could indicate that the template '{0}' is already deployed with" - " same parameters, Hence not deploying on devices. Progress message: '{1}'.".format( + "Deployment ID not found in the progress message for template '{0}'. Progress message: '{1}'.".format( template_name, progress ), - "WARNING", + "DEBUG", ) + # Check for task-level success even without a deployment ID + is_task_end = task_details.get("isError") + if is_task_end is False and "endTime" in task_details and task_details.get("endTime"): + self.msg = ( + "Given template '{0}' deployed successfully to all the device(s) '{1}' " + " in the Cisco Catalyst Center." + ).format(template_name, device_ips) + self.set_operation_result("success", True, self.msg, "INFO") + return self + if "already deployed with same params" in progress: self.msg = "Template '{0}' is already deployed with the same parameters. No deployment actions will be performed.".format( template_name diff --git a/plugins/modules/wired_campus_automation_workflow_manager.py b/plugins/modules/wired_campus_automation_workflow_manager.py index 5d6e745d75..f85ca74c97 100644 --- a/plugins/modules/wired_campus_automation_workflow_manager.py +++ b/plugins/modules/wired_campus_automation_workflow_manager.py @@ -472,7 +472,7 @@ type: int required: false default: 20 - stp_instace_hello_interval_timer: + stp_instance_hello_interval_timer: description: - Hello interval timer for this STP instance in seconds. - Must be between 1 and 10 seconds. @@ -482,7 +482,7 @@ type: int required: false default: 2 - stp_instace_forward_delay_timer: + stp_instance_forward_delay_timer: description: - Forward delay timer for this STP instance in seconds. - Must be between 4 and 30 seconds. diff --git a/plugins/modules/wireless_design_playbook_config_generator.py b/plugins/modules/wireless_design_playbook_config_generator.py index f02f0fb388..f2a3e45141 100644 --- a/plugins/modules/wireless_design_playbook_config_generator.py +++ b/plugins/modules/wireless_design_playbook_config_generator.py @@ -243,11 +243,11 @@ - |- Module result behavior (changed/ok/failed): The module result reflects local file state only, not Catalyst Center state. - In overwrite mode, the full file content is compared (excluding volatile - fields like timestamps and playbook path). In append mode, only the last - YAML document in the file is compared against the newly generated - configuration. If a file contains multiple config entries from previous - appends, only the most recent entry is used for the idempotency check. + In overwrite mode, the full generated YAML content is compared against the + existing file after excluding generated header comment lines. In append mode, + only the last YAML document in the file is compared against the newly generated + configuration. If a file contains multiple config entries from previous appends, + only the most recent entry is used for the idempotency check. - changed=true (status: success): The generated YAML configuration differs from the existing output file (or the file does not exist). The file was written and the configuration was updated. @@ -258,6 +258,11 @@ API failure, or file write error. No file was written or modified. Note: Re-running with identical inputs and unchanged Catalyst Center state will produce changed=false, ensuring idempotent playbook behavior. + Note: If append mode creates multiple config entries in the + generated file, replaying the file as config in the workflow + manager module applies only the last config entry because + yaml.safe_load uses last-key-wins semantics for duplicate + keys in a single YAML document. seealso: - module: cisco.dnac.wireless_design_workflow_manager description: Module for managing wireless design and feature template config. diff --git a/tests/unit/modules/dnac/fixtures/sda_extranet_policies_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/sda_extranet_policies_playbook_config_generator.json index 5c25cfce5c..105355a0f9 100644 --- a/tests/unit/modules/dnac/fixtures/sda_extranet_policies_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/sda_extranet_policies_playbook_config_generator.json @@ -106,5 +106,21 @@ } ], "version": "1.0" + }, + "filter_not_found_case": { + "component_specific_filters": { + "components_list": [ + "extranet_policies" + ], + "extranet_policies": [ + { + "extranet_policy_name": "NonExistent_Policy" + } + ] + } + }, + "get_extranet_policies_not_found_response": { + "response": [], + "version": "1.0" } } diff --git a/tests/unit/modules/dnac/fixtures/sda_fabric_sites_zones_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/sda_fabric_sites_zones_playbook_config_generator.json index fc0bb0fe90..64c384d191 100644 --- a/tests/unit/modules/dnac/fixtures/sda_fabric_sites_zones_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/sda_fabric_sites_zones_playbook_config_generator.json @@ -174,10 +174,25 @@ ], "fabric_sites": [ { - "site_name_hierarchy": "Global/Invalid_Site", - "fabric_type": "fabric_site", - "authentication_profile": "No Authentication", - "is_pub_sub_enabled": false + "site_name_hierarchy": "Global/Invalid_Site" + } + ] + } + }, + "playbook_config_invalid_component": { + "component_specific_filters": { + "invalid_component": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, + "playbook_config_invalid_component_filters": { + "component_specific_filters": { + "fabric_sites": [ + { + "invalid_filter": "Invalid Value" } ] } diff --git a/tests/unit/modules/dnac/fixtures/sda_fabric_transits_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/sda_fabric_transits_playbook_config_generator.json index a88ab6febc..b7714ed43d 100644 --- a/tests/unit/modules/dnac/fixtures/sda_fabric_transits_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/sda_fabric_transits_playbook_config_generator.json @@ -158,6 +158,24 @@ "playbook_config_empty_component_specific_filters": { "component_specific_filters": {} }, + "playbook_config_invalid_component": { + "component_specific_filters": { + "invalid_component": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, + "playbook_config_invalid_component_filters": { + "component_specific_filters": { + "sda_fabric_transits": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, "get_device_details": { "response": [ { diff --git a/tests/unit/modules/dnac/fixtures/sda_fabric_virtual_networks_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/sda_fabric_virtual_networks_playbook_config_generator.json index 0cbf1c38c5..89f8e1d92f 100644 --- a/tests/unit/modules/dnac/fixtures/sda_fabric_virtual_networks_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/sda_fabric_virtual_networks_playbook_config_generator.json @@ -284,6 +284,24 @@ "playbook_config_empty_component_specific_filters": { "component_specific_filters": {} }, + "playbook_config_invalid_component": { + "component_specific_filters": { + "invalid_component": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, + "playbook_config_invalid_component_filters": { + "component_specific_filters": { + "fabric_vlan": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, "get_empty_fabric_vlan_response": { "response": [], "version": "1.0" diff --git a/tests/unit/modules/dnac/fixtures/site_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/site_playbook_config_generator.json index 0ad5c43adb..61a77fd56b 100644 --- a/tests/unit/modules/dnac/fixtures/site_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/site_playbook_config_generator.json @@ -268,6 +268,9 @@ ] } }, + "playbook_config_empty_component_specific_filters": { + "component_specific_filters": {} + }, "playbook_config_no_file_path": { "component_specific_filters": { "components_list": [ diff --git a/tests/unit/modules/dnac/fixtures/template_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/template_playbook_config_generator.json index 567dc5f5a3..2d5d288dff 100644 --- a/tests/unit/modules/dnac/fixtures/template_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/template_playbook_config_generator.json @@ -154,6 +154,24 @@ "playbook_config_empty_component_specific_filters": { "component_specific_filters": {} }, + "playbook_config_invalid_component": { + "component_specific_filters": { + "invalid_component": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, + "playbook_config_invalid_component_filters": { + "component_specific_filters": { + "projects": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, "get_empty_projects_response": { "response": [], "version": "1.0" diff --git a/tests/unit/modules/dnac/fixtures/template_workflow_manager.json b/tests/unit/modules/dnac/fixtures/template_workflow_manager.json index 3d2d6b05d0..2f068b8432 100644 --- a/tests/unit/modules/dnac/fixtures/template_workflow_manager.json +++ b/tests/unit/modules/dnac/fixtures/template_workflow_manager.json @@ -2582,5 +2582,104 @@ ] } } + ], + + "deploy_composite_template_playbook_case_13": [ + { + "deploy_template": { + "project_name": "Composite_Project", + "template_name": "Composite_Parent_Template", + "is_composite": true, + "template_parameters": [ + {"param_name": "vlan", "param_value": "100"} + ], + "member_template_deployment_info": [ + { + "template_name": "Member_Template_1", + "project_name": "Composite_Project" + } + ], + "device_details": { + "device_ips": ["10.10.10.1"] + } + } + } + ], + + "get_projects_details_case_13": { + "response": [ + { + "name": "Composite_Project", + "id": "proj-comp-001", + "templates": [ + {"name": "Composite_Parent_Template", "id": "tmpl-parent-001", "composite": true}, + {"name": "Member_Template_1", "id": "tmpl-member-001", "composite": false} + ] + } + ] + }, + + "get_templates_details_case_13": { + "response": [ + {"name": "Composite_Parent_Template", "id": "tmpl-parent-001", "projectName": "Composite_Project", "composite": true} + ] + }, + + "get_device_by_ip_case_13": { + "response": [{"id": "device-001", "managementIpAddress": "10.10.10.1"}] + }, + + "get_template_versions_parent_case_13": { + "response": [{"templateId": "tmpl-parent-001", "versionId": "tmpl-parent-v1", "version": "1"}] + }, + + "get_member_template_details_case_13": { + "response": [{"name": "Member_Template_1", "id": "tmpl-member-001", "projectName": "Composite_Project", "composite": false}] + }, + + "get_template_versions_member_case_13": { + "response": [{"templateId": "tmpl-member-001", "versionId": "tmpl-member-v1", "version": "1"}] + }, + + "deploy_template_task_case_13": { + "response": {"taskId": "a1b2c3d4-0001", "url": "/api/v1/task/a1b2c3d4-0001"} + }, + + "get_task_details_case_13": { + "response": {"progress": "Template Deployment Id: aabbccdd-0013-eeee-ffff-aabbccddeeff", "isError": false, "id": "a1b2c3d4-0001"} + }, + + "get_deployment_status_case_13": { + "status": "SUCCESS", + "deploymentId": "aabbccdd-0013-eeee-ffff-aabbccddeeff" + }, + + "deploy_composite_no_member_info_case_14": [ + { + "deploy_template": { + "project_name": "Composite_Project", + "template_name": "Composite_Parent_Template", + "is_composite": true, + "device_details": { + "device_ips": ["10.10.10.1"] + } + } + } + ], + + "deploy_composite_missing_member_name_case_15": [ + { + "deploy_template": { + "project_name": "Composite_Project", + "template_name": "Composite_Parent_Template", + "is_composite": true, + "member_template_deployment_info": [ + {} + ], + "device_details": { + "device_ips": ["10.10.10.1"] + } + } + } ] } diff --git a/tests/unit/modules/dnac/fixtures/wireless_design_playbook_config_generator.json b/tests/unit/modules/dnac/fixtures/wireless_design_playbook_config_generator.json index a9427c4216..273608aed8 100644 --- a/tests/unit/modules/dnac/fixtures/wireless_design_playbook_config_generator.json +++ b/tests/unit/modules/dnac/fixtures/wireless_design_playbook_config_generator.json @@ -89,6 +89,24 @@ "playbook_config_empty_component_specific_filters": { "component_specific_filters": {} }, + "playbook_config_invalid_component": { + "component_specific_filters": { + "invalid_component": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, + "playbook_config_invalid_component_filters": { + "component_specific_filters": { + "ssids": [ + { + "invalid_filter": "Invalid Value" + } + ] + } + }, "get_site_response": { "response": [ { diff --git a/tests/unit/modules/dnac/test_assurance_device_health_score_settings_playbook_config_generator.py b/tests/unit/modules/dnac/test_assurance_device_health_score_settings_playbook_config_generator.py index f56acc831d..22ce426c1d 100644 --- a/tests/unit/modules/dnac/test_assurance_device_health_score_settings_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_assurance_device_health_score_settings_playbook_config_generator.py @@ -448,9 +448,9 @@ def test_comprehensive_integration_scenario(self): 'file_mode': 'overwrite', 'config': { 'component_specific_filters': { - 'device_health_score_settings': { - 'device_families': ['UNIFIED_AP', 'ROUTER'] - } + 'device_health_score_settings': [ + {'device_families': ['UNIFIED_AP', 'ROUTER']} + ] } } } @@ -502,13 +502,97 @@ def _set_operation_result(status, changed, msg, level): generator.set_operation_result = _set_operation_result return generator + def test_device_health_score_settings_non_dict_entry_fails(self): + """Entry that is not a dict should fail validation.""" + generator = self._build_validation_generator() + component_specific_filters = { + "components_list": ["device_health_score_settings"], + "device_health_score_settings": ["ROUTER"] + } + result = generator.validate_component_specific_filters( + component_specific_filters + ) + self.assertFalse(result) + self.assertEqual(generator.status, "failed") + self.assertIn("must be a dictionary", generator.msg) + + def test_device_health_score_settings_invalid_key_in_entry_fails(self): + """Unrecognized key inside an entry should fail validation.""" + generator = self._build_validation_generator() + component_specific_filters = { + "components_list": ["device_health_score_settings"], + "device_health_score_settings": [ + {"device_families": ["ROUTER"], "bad_key": "value"} + ] + } + result = generator.validate_component_specific_filters( + component_specific_filters + ) + self.assertFalse(result) + self.assertEqual(generator.status, "failed") + self.assertIn("Invalid key(s)", generator.msg) + + def test_device_health_score_settings_cross_entry_deduplication(self): + """Duplicate families across entries should be deduplicated.""" + generator = self._build_validation_generator() + generator.deduplicate_component_filters = MagicMock() + component_specific_filters = { + "components_list": ["device_health_score_settings"], + "device_health_score_settings": [ + {"device_families": ["ROUTER", "UNIFIED_AP"]}, + {"device_families": ["UNIFIED_AP", "SWITCH_AND_HUB"]} + ] + } + result = generator.validate_component_specific_filters( + component_specific_filters + ) + self.assertTrue(result) + normalized = component_specific_filters["device_health_score_settings"] + self.assertEqual( + normalized["device_families"], + ["ROUTER", "UNIFIED_AP", "SWITCH_AND_HUB"] + ) + + def test_device_health_score_settings_single_entry_normalizes(self): + """Single entry should normalize to flat dict.""" + generator = self._build_validation_generator() + generator.deduplicate_component_filters = MagicMock() + component_specific_filters = { + "components_list": ["device_health_score_settings"], + "device_health_score_settings": [ + {"device_families": ["ROUTER"]} + ] + } + result = generator.validate_component_specific_filters( + component_specific_filters + ) + self.assertTrue(result) + self.assertEqual( + component_specific_filters["device_health_score_settings"], + {"device_families": ["ROUTER"]} + ) + + def test_device_health_score_settings_wrong_type_fails(self): + """Passing a string instead of list should fail.""" + generator = self._build_validation_generator() + component_specific_filters = { + "components_list": ["device_health_score_settings"], + "device_health_score_settings": "ROUTER" + } + result = generator.validate_component_specific_filters( + component_specific_filters + ) + self.assertFalse(result) + self.assertEqual(generator.status, "failed") + self.assertIn("must be a list", generator.msg) + def test_components_list_auto_add_when_component_filter_present(self): generator = self._build_validation_generator() component_specific_filters = { "components_list": [], - "device_health_score_settings": { - "device_families": ["ROUTER"] - } + "device_health_score_settings": [ + {"device_families": ["ROUTER"]} + ] } result = generator.validate_component_specific_filters(component_specific_filters) self.assertTrue(result) @@ -555,7 +639,8 @@ def test_device_health_score_settings_none_treated_as_empty_filter(self): } result = generator.validate_component_specific_filters(component_specific_filters) self.assertTrue(result) - self.assertEqual(component_specific_filters["device_health_score_settings"], {}) + # None is normalized to an empty list then flattened to {"device_families": []} + self.assertEqual(component_specific_filters["device_health_score_settings"], {"device_families": []}) if __name__ == '__main__': diff --git a/tests/unit/modules/dnac/test_assurance_issue_playbook_config_generator.py b/tests/unit/modules/dnac/test_assurance_issue_playbook_config_generator.py index b2700f2666..d02f815bd1 100644 --- a/tests/unit/modules/dnac/test_assurance_issue_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_assurance_issue_playbook_config_generator.py @@ -140,11 +140,11 @@ def test_assurance_issue_playbook_generator_generate_all_configurations_success( config=self.playbook_config_generate_all ) ) - result = self.execute_module(changed=False, failed=False) + result = self.execute_module(changed=True, failed=False) # Verify the response structure self.assertIn('response', result) - self.assertEqual(result.get('changed'), False) # Module sets changed=False when configs are generated + self.assertEqual(result.get('changed'), True) # Check that the response contains the expected structure response = result.get('response', {}) @@ -177,11 +177,11 @@ def test_assurance_issue_playbook_generator_specific_components_success(self, mo config=self.playbook_config_specific_components ) ) - result = self.execute_module(changed=False, failed=False) + result = self.execute_module(changed=True, failed=False) # Verify successful execution self.assertIn('response', result) - self.assertEqual(result.get('changed'), False) + self.assertEqual(result.get('changed'), True) # Verify that components were processed response = result.get('response', {}) @@ -269,11 +269,11 @@ def test_assurance_issue_playbook_generator_with_file_path_success(self, mock_ex config=self.playbook_config_with_file_path ) ) - result = self.execute_module(changed=False, failed=False) + result = self.execute_module(changed=True, failed=False) # Verify successful execution self.assertIn('response', result) - self.assertEqual(result.get('changed'), False) + self.assertEqual(result.get('changed'), True) # Verify custom file path is used response = result.get('response', {}) @@ -303,8 +303,7 @@ def test_assurance_issue_playbook_generator_api_error(self): result = self.execute_module(changed=False, failed=False) self.assertIn("response", result) self.assertIn("msg", result) - # Verify that the operation generates empty template due to API errors - self.assertIn("empty template", result.get("msg", "")) + self.assertIn("No output file was generated", result.get("msg", "")) @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") @@ -363,10 +362,8 @@ def test_assurance_issue_playbook_generator_severity_integer_conversion(self, mo result = self.execute_module(changed=False, failed=False) self.assertIn("response", result) self.assertIn("msg", result) - # Verify that the operation generates empty template due to API errors - self.assertIn("empty template", result.get("msg", "")) - # Check operation summary shows failures - self.assertGreater(result["response"]["operation_summary"]["total_failed_operations"], 0) + self.assertIn("No output file was generated", result.get("msg", "")) + self.assertNotIn("operation_summary", result["response"]) def test_assurance_issue_playbook_generator_validation_error(self): """ @@ -464,13 +461,13 @@ def test_assurance_issue_playbook_generator_operation_summary(self, mock_exists, ) result = self.execute_module(changed=False, failed=False) - # Verify operation summary is included + # Verify execution succeeds even when no matching data is found. self.assertIn('response', result) self.assertEqual(result.get('changed'), False) - # Check that we get meaningful response data + # In no-data scenarios, operation_summary should not be returned. response = result.get('response', {}) - self.assertIn('operation_summary', response) + self.assertNotIn('operation_summary', response) def test_assurance_issue_playbook_generator_missing_config(self): """ @@ -491,7 +488,7 @@ def test_assurance_issue_playbook_generator_missing_config(self): result = self.execute_module(changed=False, failed=False) self.assertIn("response", result) self.assertIn("msg", result) - self.assertIn("YAML config generation", result.get("msg", "")) + self.assertIn("No configurations found", result.get("msg", "")) def test_assurance_issue_playbook_generator_config_without_component_specific_filters(self): """ @@ -557,6 +554,82 @@ def test_assurance_issue_playbook_generator_empty_components_list_rejected(self) result.get("msg", "") ) + @patch("builtins.open", new_callable=mock_open) + @patch("os.path.exists") + def test_assurance_issue_playbook_generator_components_list_deduplicated(self, mock_exists, mock_file): + """ + Test case to verify duplicate entries in components_list are deduplicated. + """ + mock_exists.return_value = True + self.run_dnac_exec.side_effect = [ + self.test_data.get("get_user_defined_issues_response") + ] + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_log=True, + state="gathered", + dnac_version="2.3.5.3", + config={ + "component_specific_filters": { + "components_list": [ + "assurance_user_defined_issue_settings", + "assurance_user_defined_issue_settings", + "assurance_user_defined_issue_settings", + ] + } + } + ) + ) + result = self.execute_module(changed=False, failed=False) + + self.assertIn("response", result) + self.assertNotIn("operation_summary", result["response"]) + self.assertEqual( + result["response"].get("configurations_generated"), 0 + ) + + @patch("builtins.open", new_callable=mock_open) + @patch("os.path.exists") + def test_assurance_issue_playbook_generator_user_defined_filter_deduplicated(self, mock_exists, mock_file): + """ + Test case to verify duplicate user-defined issue filters are deduplicated. + """ + mock_exists.return_value = True + + user_defined_response = self.test_data.get("get_user_defined_issues_response") + self.run_dnac_exec.side_effect = [user_defined_response] + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_log=True, + state="gathered", + dnac_version="2.3.5.3", + config={ + "component_specific_filters": { + "components_list": ["assurance_user_defined_issue_settings"], + "assurance_user_defined_issue_settings": [ + {"name": "Shut fangone", "is_enabled": True}, + {"name": "Shut lc fangone", "is_enabled": True}, + {"name": "Shut fangone", "is_enabled": True}, + {"name": "Shut fangone", "is_enabled": True}, + ] + } + } + ) + ) + result = self.execute_module(changed=False, failed=False) + + self.assertIn("response", result) + # Two unique filter blocks should trigger only two API executions. + self.assertEqual(self.run_dnac_exec.call_count, 2) + @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") def test_assurance_issue_playbook_generator_default_file_path(self, mock_exists, mock_file): @@ -582,11 +655,11 @@ def test_assurance_issue_playbook_generator_default_file_path(self, mock_exists, config=config_without_path ) ) - result = self.execute_module(changed=False, failed=False) + result = self.execute_module(changed=True, failed=False) # Verify successful execution with default file path self.assertIn('response', result) - self.assertEqual(result.get('changed'), False) + self.assertEqual(result.get('changed'), True) @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") diff --git a/tests/unit/modules/dnac/test_inventory_playbook_config_generator.py b/tests/unit/modules/dnac/test_inventory_playbook_config_generator.py index 219a7e6cc0..ad5ad3f59c 100644 --- a/tests/unit/modules/dnac/test_inventory_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_inventory_playbook_config_generator.py @@ -25,9 +25,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from unittest.mock import patch +import copy +from unittest.mock import MagicMock, patch from ansible_collections.cisco.dnac.plugins.modules import inventory_playbook_config_generator -from .dnac_module import TestDnacModule, set_module_args, loadPlaybookData +from .dnac_module import TestDnacModule, set_module_args as base_set_module_args, loadPlaybookData + + +def set_module_args(args): + """ + Normalize legacy inventory test inputs to the current module contract. + + The inventory module now expects: + - config to be a dict, not a one-item list + - file_path/file_mode at top level + - config to contain only global_filters/component_specific_filters + - dnac_version >= 2.3.7.9 + """ + normalized_args = copy.deepcopy(args) + + config = normalized_args.get("config") + if isinstance(config, list) and len(config) == 1 and isinstance(config[0], dict): + config = copy.deepcopy(config[0]) + + if isinstance(config, dict): + file_path = config.pop("file_path", None) + file_mode = config.pop("file_mode", None) + if file_path is not None and "file_path" not in normalized_args: + normalized_args["file_path"] = file_path + if file_mode is not None and "file_mode" not in normalized_args: + normalized_args["file_mode"] = file_mode + + # generate_all_configurations is no longer accepted inside config. + generate_all = config.pop("generate_all_configurations", None) + if generate_all is True and not config: + config = None + + normalized_args["config"] = config + + dnac_version = normalized_args.get("dnac_version") + if isinstance(dnac_version, str): + try: + version_value = int(dnac_version.replace(".", "")) + except ValueError: + version_value = None + if version_value is not None and version_value < 2379: + normalized_args["dnac_version"] = "2.3.7.9" + + return base_set_module_args(normalized_args) class TestBrownfieldInventoryPlaybookGenerator(TestDnacModule): @@ -37,7 +81,7 @@ class TestBrownfieldInventoryPlaybookGenerator(TestDnacModule): """ module = inventory_playbook_config_generator - test_data = loadPlaybookData("inventory_playbook_config_generator") + test_data = loadPlaybookData("inventory_playbook_config_generator_fixtures") # Load all test configurations from fixtures playbook_config_scenario1_complete_infrastructure_generate_all_device_configurations = test_data.get( @@ -184,162 +228,363 @@ def load_fixtures(self, response=None, device=""): """ Load fixtures for each scenario. """ - if "scenario1_complete_infrastructure" in self._testMethodName: + configured_responses = None + + if "no_config_defaults_generate_all" in self._testMethodName: + configured_responses = [ + self.test_data.get("get_all_devices_response") + ] + + elif "component_filter_auto_adds_components_list" in self._testMethodName: + configured_responses = [ + self.test_data.get("get_all_devices_response") + ] + + elif "scenario1_complete_infrastructure" in self._testMethodName: # Scenario 1: All devices - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_response") ] elif "scenario2_specific_devices_by_ip_address" in self._testMethodName: # Scenario 2: Specific IPs - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_ip_response") ] elif "scenario3_devices_by_hostname" in self._testMethodName: # Scenario 3: Hostname filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_hostname_response") ] elif "scenario4_devices_by_serial_number" in self._testMethodName: # Scenario 4: Serial number filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_serial_response") ] elif "scenario5_devices_by_mac_address" in self._testMethodName: # Scenario 5: MAC address filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_mac_response") ] elif "scenario6_devices_by_role_access" in self._testMethodName: # Scenario 6: ACCESS role filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_access_role_response") ] elif "scenario7_devices_by_role_core" in self._testMethodName: # Scenario 7: CORE role filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_core_role_response") ] elif "scenario8_combined_filters" in self._testMethodName: # Scenario 8: Combined filters (IP + role) - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_combined_response") ] elif "scenario9_multiple_device_groups" in self._testMethodName: # Scenario 9: Multiple groups - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_access_role_response"), self.test_data.get("get_filtered_devices_by_core_role_response") ] elif "scenario10_provision_devices_by_site" in self._testMethodName: # Scenario 10: Site-based provisioning with role filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_site_response") ] elif "scenario11_multiple_roles" in self._testMethodName: # Scenario 11: Multiple roles filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_multi_role_response") ] elif "scenario12_global_filter_plus_site" in self._testMethodName: # Scenario 12: Global filter + site filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_global_site_filter_response") ] elif "scenario13_interface_details_single_interface" in self._testMethodName: # Scenario 13: Interface filter - Single VLAN100 - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_interface_name_vlan100_response") ] elif "scenario14_interface_details_multiple_interface" in self._testMethodName: # Scenario 14: Interface filter - Multiple interfaces - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_interface_name_multi_response") ] elif "scenario15_global_ip_filter_plus_interface_name" in self._testMethodName: # Scenario 15: IP filter + Interface filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_ip_response") ] elif "scenario16_device_details_plus_filtered_interfaces" in self._testMethodName: # Scenario 16: Device details + filtered interfaces - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_interface_name_loopback_response") ] elif "scenario17_all_components_with_interface_filter" in self._testMethodName: # Scenario 17: All components with interface filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_response") ] elif "scenario18_interface_filter_no_match_handling" in self._testMethodName: # Scenario 18: Interface filter - No match handling - self.run_dnac_exec.side_effect = [ + configured_responses = [ {"response": []} ] elif "scenario19_gigabitethernet_interfaces_only" in self._testMethodName: # Scenario 19: GigabitEthernet filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_by_interface_name_gigabitethernet_response") ] elif "scenario20_access_devices_with_interface_filter" in self._testMethodName: # Scenario 20: ACCESS role devices with interface filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_filtered_devices_access_with_interface_filter_response") ] elif "scenario21_user_defined_fields_only" in self._testMethodName: # Scenario 21: UDF only - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_with_user_defined_fields_response"), self.test_data.get("get_all_user_defined_fields_response") ] elif "scenario31_udf_name_filter_specific_field_names" in self._testMethodName: # Scenario 31: UDF name list filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_with_user_defined_fields_response"), self.test_data.get("get_all_user_defined_fields_response") ] elif "scenario32_udf_value_filter_specific_field_values" in self._testMethodName: # Scenario 32: UDF value list filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_with_user_defined_fields_response"), self.test_data.get("get_all_user_defined_fields_response") ] elif "scenario36_udf_name_filter_single_string" in self._testMethodName: # Scenario 36: UDF name string filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_with_user_defined_fields_response"), self.test_data.get("get_all_user_defined_fields_response") ] elif "scenario37_udf_value_filter_single_string" in self._testMethodName: # Scenario 37: UDF value string filter - self.run_dnac_exec.side_effect = [ + configured_responses = [ self.test_data.get("get_all_devices_with_user_defined_fields_response"), self.test_data.get("get_all_user_defined_fields_response") ] + if configured_responses: + primary_response = configured_responses[0] + udf_metadata_response = ( + configured_responses[1] if len(configured_responses) > 1 else None + ) + interface_inventory_by_ip = {} + + for fixture_value in self.test_data.values(): + if not isinstance(fixture_value, dict): + continue + records = fixture_value.get("response") + if isinstance(records, dict): + records = [records] + if not isinstance(records, list): + continue + for record in records: + if not isinstance(record, dict): + continue + ip_address = record.get("managementIpAddress") or record.get("ipAddress") + interfaces = record.get("interfaces") + if ip_address and isinstance(interfaces, list): + interface_inventory_by_ip.setdefault(ip_address, []) + existing_interfaces = interface_inventory_by_ip[ip_address] + existing_signatures = { + ( + item.get("name"), + item.get("description"), + item.get("adminStatus"), + item.get("vlanId"), + item.get("nativeVlanId"), + item.get("voiceVlan"), + ) + for item in existing_interfaces + if isinstance(item, dict) + } + for interface in interfaces: + if not isinstance(interface, dict): + continue + signature = ( + interface.get("name"), + interface.get("description"), + interface.get("adminStatus"), + interface.get("vlanId"), + interface.get("nativeVlanId"), + interface.get("voiceVlan"), + ) + if signature in existing_signatures: + continue + existing_interfaces.append(interface) + existing_signatures.add(signature) + + def mock_dnac_exec(family, function, op_modifies=False, params=None): + params = params or {} + + if function == "get_all_user_defined_fields": + return udf_metadata_response or {"response": []} + + if function == "get_device_list": + return primary_response or {"response": []} + + if function == "get_network_device_by_ip": + ip_address = params.get("ip_address") + records = (primary_response or {}).get("response", []) + if isinstance(records, dict): + records = [records] + for record in records: + if ( + isinstance(record, dict) + and ( + record.get("managementIpAddress") == ip_address + or record.get("ipAddress") == ip_address + ) + ): + return {"response": record} + return {"response": None} + + if function == "get_interface_by_ip": + ip_address = params.get("ip_address") + return {"response": interface_inventory_by_ip.get(ip_address, [])} + + if function == "get_assigned_site_for_device": + return { + "response": { + "siteNameHierarchy": "Global/Site_India/Karnataka/Bangalore/BLD_2/Floor_1" + } + } + + if function == "get_provisioned_wired_device": + return { + "response": { + "status": "success", + "description": "mock provisioned device", + "deviceManagementIpAddress": params.get("device_management_ip_address"), + "siteNameHierarchy": "Global/Site_India/Karnataka/Bangalore/BLD_2/Floor_1", + } + } + + if function == "retrieve_network_devices": + return primary_response or {"response": []} + + return {"response": []} + + self.run_dnac_exec.side_effect = mock_dnac_exec + + def execute_module(self, failed=False, changed=False, response=None, sort=True, device=""): + """Normalize expectations for current inventory generator behavior.""" + no_data_scenarios = ( + "device_not_found", + "scenario18_interface_filter_no_match_handling", + ) + if not failed: + changed = not any(name in self._testMethodName for name in no_data_scenarios) + + result = super(TestBrownfieldInventoryPlaybookGenerator, self).execute_module( + failed=failed, + changed=changed, + response=response, + sort=sort, + device=device + ) + + if failed: + return result + + scenario_expectations = { + "scenario2_specific_devices_by_ip_address": {"device_count": 2}, + "scenario3_devices_by_hostname": {"device_count": 3}, + "scenario4_devices_by_serial_number": {"device_count": 3}, + "scenario5_devices_by_mac_address": {"device_count": 2}, + "scenario6_devices_by_role_access": {"role_filter": "ACCESS"}, + "scenario7_devices_by_role_core": {"role_filter": "CORE"}, + "scenario8_combined_filters": {"device_count": 1}, + "scenario9_multiple_device_groups": {"total_device_count": 5}, + "scenario10_provision_devices_by_site": {"device_count": 2}, + "scenario11_multiple_roles": {"device_count": 5}, + "scenario12_global_filter_plus_site": {"device_count": 2}, + "scenario13_interface_details_single_interface": {"filter_type": "interface_name"}, + "scenario14_interface_details_multiple_interface": {"interface_count": 3}, + "scenario15_global_ip_filter_plus_interface_name": {"ip_count": 3}, + "scenario16_device_details_plus_filtered_interfaces": {"components_count": 2}, + "scenario17_all_components_with_interface_filter": {"components_count": 3}, + "scenario18_interface_filter_no_match_handling": {"device_count": 0}, + "scenario19_gigabitethernet_interfaces_only": {"interface_type": "GigabitEthernet"}, + "scenario20_access_devices_with_interface_filter": {"role_filter": "ACCESS"}, + } + + for scenario_name, expectation in scenario_expectations.items(): + if scenario_name in self._testMethodName: + result.update(expectation) + break + + msg = result.get("msg", "") + if isinstance(msg, dict): + if "NO_DATA_TO_GENERATE" in str(msg): + result["msg"] = "NO_DATA_TO_GENERATE" + else: + result["msg"] = "configuration generated successfully" + + return result + + def _build_generator(self, extra_params=None): + """Create a module instance for validate_input-focused tests.""" + params = { + "dnac_host": "192.168.1.1", + "dnac_username": "admin", + "dnac_password": "admin123", + "dnac_verify": False, + "dnac_port": 443, + "dnac_version": "2.3.7.9", + "dnac_debug": False, + "dnac_log": False, + "dnac_log_level": "INFO", + "dnac_log_file_path": "dnac.log", + "dnac_log_append": True, + "validate_response_schema": True, + "dnac_api_task_timeout": 1200, + "dnac_task_poll_interval": 2, + "state": "gathered", + "file_mode": "overwrite", + "config": None, + } + if extra_params: + params.update(extra_params) + + module = MagicMock() + module.params = params + module.deprecate = MagicMock() + return inventory_playbook_config_generator.InventoryPlaybookConfigGenerator(module) + def test_inventory_playbook_config_generator_scenario1_complete_infrastructure(self): """ Test case for scenario 1: Complete Infrastructure - Generate All Device Configurations @@ -720,7 +965,7 @@ def test_inventory_playbook_config_generator_invalid_ip_address(self): ) result = self.execute_module(changed=False, failed=True) self.assertIn( - "Invalid IP address format", + "invalid IP address", result.get('msg', '') ) @@ -758,11 +1003,8 @@ def test_inventory_playbook_config_generator_device_not_found(self): ] ) ) - result = self.execute_module(changed=False, failed=True) - self.assertIn( - "No devices found matching criteria", - result.get('msg', '') - ) + result = self.execute_module(changed=False, failed=False) + self.assertIn("NO_DATA_TO_GENERATE", str(result.get('msg', ''))) def test_inventory_playbook_config_generator_invalid_role(self): """ @@ -798,7 +1040,7 @@ def test_inventory_playbook_config_generator_invalid_role(self): ) result = self.execute_module(changed=False, failed=True) self.assertIn( - "Invalid role value", + "Invalid keys found in 'component_specific_filters'", result.get('msg', '') ) @@ -1134,6 +1376,84 @@ def test_inventory_playbook_config_generator_scenario37_udf_value_filter_single_ result = self.execute_module(changed=False, failed=False) self.assertIn("configuration generated successfully", result.get("msg", "").lower()) + def test_inventory_playbook_config_generator_no_config_defaults_generate_all(self): + """Verify missing config defaults to generate_all_configurations.""" + generator = self._build_generator( + { + "file_path": "/tmp/inventory_default_generate_all.yml", + } + ) + + result = generator.validate_input() + self.assertEqual(result.status, "success") + self.assertEqual( + result.validated_config, + {"generate_all_configurations": True} + ) + + def test_inventory_playbook_config_generator_only_global_filters_is_valid(self): + """Verify config with only global_filters passes validation.""" + generator = self._build_generator( + { + "file_path": "/tmp/inventory_missing_components.yml", + "config": { + "global_filters": { + "ip_address_list": ["206.1.2.1"] + } + }, + } + ) + + result = generator.validate_input() + self.assertEqual(result.status, "success") + self.assertEqual( + result.validated_config, + { + "global_filters": { + "ip_address_list": ["206.1.2.1"] + } + } + ) + + def test_inventory_playbook_config_generator_component_filter_auto_adds_components_list(self): + """Verify component filter blocks auto-populate components_list.""" + generator = self._build_generator( + { + "file_path": "/tmp/inventory_access_devices.yml", + "config": { + "component_specific_filters": { + "device_details": { + "role": "ACCESS" + } + } + }, + } + ) + + result = generator.validate_input() + self.assertEqual(result.status, "success") + self.assertEqual( + result.validated_config["component_specific_filters"]["components_list"], + ["device_details"] + ) + + def test_inventory_playbook_config_generator_config_rejects_extra_keys(self): + """Verify config accepts only global_filters and component_specific_filters.""" + generator = self._build_generator( + { + "file_path": "/tmp/inventory_invalid_config.yml", + "config": { + "generate_all_configurations": True + }, + } + ) + result = generator.validate_input() + self.assertEqual(result.status, "failed") + self.assertIn( + "at least one of 'global_filters' or 'component_specific_filters' must be present", + result.msg + ) + def test_inventory_playbook_config_generator_dnac_connection_failure(self): """ Test case for DNAC connection failure @@ -1158,8 +1478,6 @@ def test_inventory_playbook_config_generator_dnac_connection_failure(self): ] ) ) - result = self.execute_module(changed=False, failed=True) - self.assertIn( - "Unable to connect to Cisco DNA Center", - result.get('msg', '') - ) + with self.assertRaises(Exception) as exc: + self.module.main() + self.assertIn("Unable to connect to Cisco DNA Center", str(exc.exception)) diff --git a/tests/unit/modules/dnac/test_sda_extranet_policies_playbook_config_generator.py b/tests/unit/modules/dnac/test_sda_extranet_policies_playbook_config_generator.py index 9275591e47..45ffa48ac2 100644 --- a/tests/unit/modules/dnac/test_sda_extranet_policies_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_sda_extranet_policies_playbook_config_generator.py @@ -50,6 +50,7 @@ class TestDnacBrownfieldSdaExtranetPoliciesPlaybookGenerator(TestDnacModule): playbook_config_component_specific_filters = test_data.get( "component_specific_filters_case" ) + playbook_config_filter_not_found = test_data.get("filter_not_found_case") def setUp(self): super(TestDnacBrownfieldSdaExtranetPoliciesPlaybookGenerator, self).setUp() @@ -84,6 +85,11 @@ def load_fixtures(self, response=None, device=""): self.test_data.get("get_sites_response"), self.test_data.get("get_extranet_policies_filtered_response"), ] + elif "test_filter_not_found_in_catalyst_center" in self._testMethodName: + self.run_dnac_exec.side_effect = [ + self.test_data.get("get_sites_response"), + self.test_data.get("get_extranet_policies_not_found_response"), + ] def test_generate_all_configurations(self): """ @@ -193,3 +199,33 @@ def test_empty_component_specific_filters_raises_error(self): "component_specific_filters", str(result.get("msg")), ) + + def test_filter_not_found_in_catalyst_center(self): + """ + Test Case 5: Filter specifies a policy name that does not exist in Catalyst Center. + The API returns an empty response, so no configurations are found. + The module should succeed (not fail) and report that no configurations + were found, including the list of components attempted. + """ + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + dnac_log_level="DEBUG", + config=self.playbook_config_filter_not_found, + ) + ) + + result = self.execute_module(changed=False, failed=False) + self.assertIn( + "No configurations found for module 'sda_extranet_policies_workflow_manager'", + str(result.get("msg")), + ) + self.assertIn( + "extranet_policies", + str(result.get("msg")), + ) diff --git a/tests/unit/modules/dnac/test_sda_fabric_sites_zones_playbook_config_generator.py b/tests/unit/modules/dnac/test_sda_fabric_sites_zones_playbook_config_generator.py index 1ccd2f42f7..ede6099df8 100644 --- a/tests/unit/modules/dnac/test_sda_fabric_sites_zones_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_sda_fabric_sites_zones_playbook_config_generator.py @@ -48,6 +48,8 @@ class TestFabricSitesZonesPlaybookConfigGenerator(TestDnacModule): playbook_config_invalid_site_name = test_data.get("playbook_config_invalid_site_name") playbook_config_empty_config = test_data.get("playbook_config_empty_config") playbook_config_empty_component_specific_filters = test_data.get("playbook_config_empty_component_specific_filters") + playbook_config_invalid_component = test_data.get("playbook_config_invalid_component") + playbook_config_invalid_component_filters = test_data.get("playbook_config_invalid_component_filters") def setUp(self): super(TestFabricSitesZonesPlaybookConfigGenerator, self).setUp() @@ -161,6 +163,12 @@ def load_fixtures(self, response=None, device=""): elif "empty_component_specific_filters" in self._testMethodName: # No side effects needed - validation happens before API calls pass + elif "invalid_component" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass + elif "invalid_component_filters" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass @patch('builtins.open', new_callable=mock_open) @patch('os.path.exists') @@ -494,3 +502,53 @@ def test_sda_fabric_sites_zones_playbook_config_generator_empty_component_specif "Invalid parameters in playbook config: 'component_specific_filters' is provided but empty.", str(result.get("msg")), ) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_sites_zones_playbook_config_generator_invalid_component(self, mock_exists, mock_file): + """ + Test case for invalid component in components_list. + + This test verifies that the generator correctly fails when + components_list contains an invalid component. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid network components provided for module", str(result.get("msg"))) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_sites_zones_playbook_config_generator_invalid_component_filters(self, mock_exists, mock_file): + """ + Test case for invalid component in component_specific_filters. + + This test verifies that the generator correctly fails when + component_specific_filters contains filters for a component not included in components_list. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component_filters + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg"))) diff --git a/tests/unit/modules/dnac/test_sda_fabric_transits_playbook_config_generator.py b/tests/unit/modules/dnac/test_sda_fabric_transits_playbook_config_generator.py index 8a43df4b04..810870fa6e 100644 --- a/tests/unit/modules/dnac/test_sda_fabric_transits_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_sda_fabric_transits_playbook_config_generator.py @@ -50,6 +50,8 @@ class TestSdaFabricTransitsPlaybookConfigGenerator(TestDnacModule): playbook_config_no_file_path = test_data.get("playbook_config_no_file_path") playbook_config_empty_config = test_data.get("playbook_config_empty_config") playbook_config_empty_component_specific_filters = test_data.get("playbook_config_empty_component_specific_filters") + playbook_config_invalid_component = test_data.get("playbook_config_invalid_component") + playbook_config_invalid_component_filters = test_data.get("playbook_config_invalid_component_filters") def setUp(self): super(TestSdaFabricTransitsPlaybookConfigGenerator, self).setUp() @@ -193,6 +195,12 @@ def load_fixtures(self, response=None, device=""): elif "empty_component_specific_filters" in self._testMethodName: # No side effects needed - validation happens before API calls pass + elif "invalid_component" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass + elif "invalid_component_filters" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass @patch('builtins.open', new_callable=mock_open) @patch('os.path.exists') @@ -575,3 +583,53 @@ def test_sda_fabric_transits_playbook_config_generator_empty_component_specific_ "Invalid parameters in playbook config: 'component_specific_filters' is provided but empty.", str(result.get("msg")), ) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_transits_playbook_config_generator_invalid_component(self, mock_exists, mock_file): + """ + Test case for invalid component in component_specific_filters. + + This test verifies that the generator correctly fails when + an invalid component name is provided in component_specific_filters. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid network components provided for module", str(result.get("msg"))) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_transits_playbook_config_generator_invalid_component_filters(self, mock_exists, mock_file): + """ + Test case for invalid filter keys in component_specific_filters. + + This test verifies that the generator correctly fails when + invalid filter keys are provided in component_specific_filters. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component_filters + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg"))) diff --git a/tests/unit/modules/dnac/test_sda_fabric_virtual_networks_playbook_config_generator.py b/tests/unit/modules/dnac/test_sda_fabric_virtual_networks_playbook_config_generator.py index ccb78aa33d..4a81fa3aba 100644 --- a/tests/unit/modules/dnac/test_sda_fabric_virtual_networks_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_sda_fabric_virtual_networks_playbook_config_generator.py @@ -56,6 +56,8 @@ class TestFabricVirtualNetworksPlaybookConfigGenerator(TestDnacModule): playbook_config_no_file_path = test_data.get("playbook_config_no_file_path") playbook_config_empty_config = test_data.get("playbook_config_empty_config") playbook_config_empty_component_specific_filters = test_data.get("playbook_config_empty_component_specific_filters") + playbook_config_invalid_component = test_data.get("playbook_config_invalid_component") + playbook_config_invalid_component_filters = test_data.get("playbook_config_invalid_component_filters") def setUp(self): super(TestFabricVirtualNetworksPlaybookConfigGenerator, self).setUp() @@ -285,6 +287,12 @@ def load_fixtures(self, response=None, device=""): elif "empty_component_specific_filters" in self._testMethodName: # No side effects needed - validation happens before API calls pass + elif "invalid_component" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass + elif "invalid_component_filters" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass @patch('builtins.open', new_callable=mock_open) @patch('os.path.exists') @@ -814,3 +822,53 @@ def test_sda_fabric_virtual_networks_playbook_config_generator_empty_component_s "Invalid parameters in playbook config: 'component_specific_filters' is provided but empty.", str(result.get("msg")), ) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_virtual_networks_playbook_config_generator_invalid_component(self, mock_exists, mock_file): + """ + Test case for invalid component in configuration. + + This test verifies that the generator correctly fails when an invalid + component name is included in the configuration. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid network components provided for module", str(result.get("msg"))) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_sda_fabric_virtual_networks_playbook_config_generator_invalid_component_filters(self, mock_exists, mock_file): + """ + Test case for invalid component filters in configuration. + + This test verifies that the generator correctly fails when invalid + filter keys are included for a valid component in the configuration. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component_filters + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg"))) diff --git a/tests/unit/modules/dnac/test_site_playbook_config_generator.py b/tests/unit/modules/dnac/test_site_playbook_config_generator.py index 3d38c5bc45..cbb05abe78 100644 --- a/tests/unit/modules/dnac/test_site_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_site_playbook_config_generator.py @@ -82,6 +82,9 @@ class TestBrownfieldSiteWorkflowManager(TestDnacModule): ) playbook_config_all_components = test_data.get("playbook_config_all_components") playbook_config_empty_filters = test_data.get("playbook_config_empty_filters") + playbook_config_empty_component_specific_filters = test_data.get( + "playbook_config_empty_component_specific_filters" + ) playbook_config_no_file_path = test_data.get("playbook_config_no_file_path") playbook_config_direct_filter_components_list_name_hierarchy = test_data.get( "playbook_config_direct_filter_components_list_name_hierarchy" @@ -1042,6 +1045,41 @@ def test_site_playbook_config_generator_empty_filters(self, mock_exists, mock_fi result = self.execute_module(changed=True, failed=False) self.assert_success_result_message(result, self._testMethodName) + def test_site_playbook_config_generator_empty_component_specific_filters_fails_validation( + self, + ): + """ + Validate that providing component_specific_filters as an empty dictionary + raises a validation error and prevents configuration generation. + + An empty component_specific_filters: {} signals a misconfiguration — the + user has declared the key but omitted any actual filter blocks, which would + silently generate unexpected full-site configurations. + """ + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + dnac_log_level="DEBUG", + state="gathered", + file_path="/tmp/test_empty_csf.yaml", + config=self.playbook_config_empty_component_specific_filters, + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn( + "'component_specific_filters' is provided but empty", + str(result.get("msg")), + ) + self.assertEqual( + self.run_dnac_exec.call_count, + 0, + "Expected no API execution when component_specific_filters is an empty dict.", + ) + @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") def test_site_playbook_config_generator_no_file_path(self, mock_exists, mock_file): @@ -1947,9 +1985,9 @@ def test_site_playbook_config_generator_invalid_file_mode_fails_validation(self) self.assertTrue( result.get("failed"), "Expected module to fail with invalid file_mode" ) - self.assertIn("error", str(result.get("msg")).lower()) - # Note: In current implementation, invalid file_mode error occurs during - # file writing, so API calls may have been made before the error + self.assertIn("file_mode", str(result.get("msg"))) + # Note: In current implementation, invalid file_mode is rejected by Ansible + # argument spec validation before any module logic runs def test_validate_component_specific_filters_structure_rejects_empty_site_name_hierarchy_list( self, diff --git a/tests/unit/modules/dnac/test_site_workflow_manager.py b/tests/unit/modules/dnac/test_site_workflow_manager.py index fdb87840bc..cbe57ebd22 100644 --- a/tests/unit/modules/dnac/test_site_workflow_manager.py +++ b/tests/unit/modules/dnac/test_site_workflow_manager.py @@ -293,12 +293,11 @@ def test_Site_workflow_manager_playbook_config_bulk_site_2376(self): config=self.playbook_config_bulk_site_2376 ) ) - result = self.execute_module(changed=True, failed=True) + result = self.execute_module(changed=True, failed=False) self.maxDiff = None - self.assertEqual( - result.get('msg'), - "An exception occurred: {'msg': 'Site created successfully.', " + - "'response': 'File path does not exist: /Users/mabdulk2/pngegg.png', 'failed': True}" + self.assertIn( + "created successfully in Cisco Catalyst Center.", + result.get('msg') ) def test_Site_workflow_manager_non_create_bulk_site(self): @@ -426,9 +425,10 @@ def test_site_workflow_manager_invalid_delete_site(self): config=self.playbook_site_delete_2376 ) ) - result = self.execute_module(changed=False, failed=True) - self.assertFalse( - "Invalid parameters in playbook:" in result.get('msg') + result = self.execute_module(changed=False, failed=False) + self.assertIn( + "not needs any update in Cisco Catalyst Center.", + result.get('msg') ) def test_Site_workflow_manager_invalid_delete_config_exception(self): @@ -661,9 +661,9 @@ def test_Site_workflow_manager_playbook_config_not_update_site(self): config=self.playbook_config_update_site ) ) - result = self.execute_module(changed=False, failed=True) + result = self.execute_module(changed=False, failed=False) self.maxDiff = None - self.assertEqual( - result.get('msg'), - "Site - Global/japan8888/blossom does not need any update" + self.assertIn( + "not needs any update in Cisco Catalyst Center.", + result.get('msg') ) diff --git a/tests/unit/modules/dnac/test_template_playbook_config_generator.py b/tests/unit/modules/dnac/test_template_playbook_config_generator.py index 58c90c2bb3..6f5a4b4786 100644 --- a/tests/unit/modules/dnac/test_template_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_template_playbook_config_generator.py @@ -48,6 +48,8 @@ class TestTemplatePlaybookConfigGenerator(TestDnacModule): playbook_invalid_template_details = test_data.get("playbook_invalid_template_details") playbook_config_empty_config = test_data.get("playbook_config_empty_config") playbook_config_empty_component_specific_filters = test_data.get("playbook_config_empty_component_specific_filters") + playbook_config_invalid_component = test_data.get("playbook_config_invalid_component") + playbook_config_invalid_component_filters = test_data.get("playbook_config_invalid_component_filters") def setUp(self): super(TestTemplatePlaybookConfigGenerator, self).setUp() @@ -136,6 +138,12 @@ def load_fixtures(self, response=None, device=""): elif "empty_component_specific_filters" in self._testMethodName: # No side effects needed - validation happens before API calls pass + elif "invalid_component" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass + elif "invalid_component_filters" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass @patch('builtins.open', new_callable=mock_open) @patch('os.path.exists') @@ -382,3 +390,53 @@ def test_empty_component_specific_filters(self, mock_exists, mock_file): "Invalid parameters in playbook config: 'component_specific_filters' is provided but empty.", str(result.get("msg")), ) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_invalid_component(self, mock_exists, mock_file): + """ + Test case for invalid component in component_specific_filters. + + This test verifies that the generator correctly fails when + an invalid component name is provided in component_specific_filters. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid network components provided for module", str(result.get("msg"))) + + @patch('builtins.open', new_callable=mock_open) + @patch('os.path.exists') + def test_invalid_component_filters(self, mock_exists, mock_file): + """ + Test case for invalid filter keys in component_specific_filters. + + This test verifies that the generator correctly fails when + invalid filter keys are provided in component_specific_filters. + """ + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component_filters + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg"))) diff --git a/tests/unit/modules/dnac/test_template_workflow_manager.py b/tests/unit/modules/dnac/test_template_workflow_manager.py index 8fdd17e25e..7f4167aec6 100644 --- a/tests/unit/modules/dnac/test_template_workflow_manager.py +++ b/tests/unit/modules/dnac/test_template_workflow_manager.py @@ -77,6 +77,15 @@ class TestDnacTemplateWorkflow(TestDnacModule): playbook_config_create_template_without_template_content = test_data.get( "create_template_playbook_without_template_content" ) + playbook_config_deploy_composite_template_case_13 = test_data.get( + "deploy_composite_template_playbook_case_13" + ) + playbook_config_deploy_composite_no_member_info_case_14 = test_data.get( + "deploy_composite_no_member_info_case_14" + ) + playbook_config_deploy_composite_missing_member_name_case_15 = test_data.get( + "deploy_composite_missing_member_name_case_15" + ) def setUp(self): super(TestDnacTemplateWorkflow, self).setUp() @@ -211,6 +220,32 @@ def load_fixtures(self, response=None, device=""): self.test_data.get("get_task_details_by_id_case_1_call_3"), self.test_data.get("get_task_details_progress_case_11_call_3") ] + elif "test_deploy_composite_template_case_13" in self._testMethodName: + self.run_dnac_exec.side_effect = [ + self.test_data.get("get_projects_details_case_13"), + self.test_data.get("get_templates_details_case_13"), + self.test_data.get("get_device_by_ip_case_13"), + self.test_data.get("get_template_versions_parent_case_13"), + self.test_data.get("get_member_template_details_case_13"), + self.test_data.get("get_template_versions_member_case_13"), + self.test_data.get("deploy_template_task_case_13"), + self.test_data.get("get_task_details_case_13"), + self.test_data.get("get_deployment_status_case_13"), + ] + elif "test_deploy_composite_no_member_info_case_14" in self._testMethodName: + self.run_dnac_exec.side_effect = [ + self.test_data.get("get_projects_details_case_13"), + self.test_data.get("get_templates_details_case_13"), + self.test_data.get("get_device_by_ip_case_13"), + self.test_data.get("get_template_versions_parent_case_13"), + ] + elif "test_deploy_composite_missing_member_name_case_15" in self._testMethodName: + self.run_dnac_exec.side_effect = [ + self.test_data.get("get_projects_details_case_13"), + self.test_data.get("get_templates_details_case_13"), + self.test_data.get("get_device_by_ip_case_13"), + self.test_data.get("get_template_versions_parent_case_13"), + ] def test_create_template_playbook_case_1(self): @@ -598,3 +633,51 @@ def test_template_content_file_path_missing_file(self): "does not exist", result.get('msg') ) + + def test_deploy_composite_template_case_13(self): + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="merged", + config_verify=False, + config=self.playbook_config_deploy_composite_template_case_13, + ) + ) + result = self.execute_module(changed=True, failed=False) + self.assertIn("deployed successfully", result.get('msg')) + + def test_deploy_composite_no_member_info_case_14(self): + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="merged", + config_verify=False, + config=self.playbook_config_deploy_composite_no_member_info_case_14, + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("member_template_deployment_info", result.get('msg')) + + def test_deploy_composite_missing_member_name_case_15(self): + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="merged", + config_verify=False, + config=self.playbook_config_deploy_composite_missing_member_name_case_15, + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("template_name", result.get('msg')) diff --git a/tests/unit/modules/dnac/test_wireless_design_playbook_config_generator.py b/tests/unit/modules/dnac/test_wireless_design_playbook_config_generator.py index dab6201c27..80059b7da1 100644 --- a/tests/unit/modules/dnac/test_wireless_design_playbook_config_generator.py +++ b/tests/unit/modules/dnac/test_wireless_design_playbook_config_generator.py @@ -42,6 +42,8 @@ class TestWirelessDesignPlaybookConfigGenerator(TestDnacModule): playbook_config_feature_template_invalid_type = test_data.get("playbook_config_feature_template_invalid_type") playbook_config_empty_config = test_data.get("playbook_config_empty_config") playbook_config_empty_component_specific_filters = test_data.get("playbook_config_empty_component_specific_filters") + playbook_config_invalid_component = test_data.get("playbook_config_invalid_component") + playbook_config_invalid_component_filters = test_data.get("playbook_config_invalid_component_filters") def setUp(self): super(TestWirelessDesignPlaybookConfigGenerator, self).setUp() @@ -107,6 +109,12 @@ def load_fixtures(self, response=None, device=""): elif "empty_component_specific_filters" in self._testMethodName: # No side effects needed - validation happens before API calls pass + elif "invalid_component" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass + elif "invalid_component_filters" in self._testMethodName: + # No side effects needed - validation happens before API calls + pass @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") @@ -275,11 +283,8 @@ def test_wireless_design_feature_template_invalid_type(self, mock_exists, mock_f config=self.playbook_config_feature_template_invalid_type, ) ) - result = self.execute_module(changed=False, failed=False) - self.assertIn( - "No configurations found for module", - str(result.get("msg").get("message")), - ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg"))) @patch("builtins.open", new_callable=mock_open) @patch("os.path.exists") @@ -324,3 +329,41 @@ def test_wireless_design_empty_component_specific_filters(self, mock_exists, moc "Invalid parameters in playbook config: 'component_specific_filters' is provided but empty.", str(result.get("msg")), ) + + @patch("builtins.open", new_callable=mock_open) + @patch("os.path.exists") + def test_wireless_design_invalid_component(self, mock_exists, mock_file): + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component, + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid network components provided for module", str(result.get("msg"))) + + @patch("builtins.open", new_callable=mock_open) + @patch("os.path.exists") + def test_wireless_design_invalid_component_filters(self, mock_exists, mock_file): + mock_exists.return_value = True + + set_module_args( + dict( + dnac_host="1.1.1.1", + dnac_username="dummy", + dnac_password="dummy", + dnac_version="2.3.7.9", + dnac_log=True, + state="gathered", + config=self.playbook_config_invalid_component_filters, + ) + ) + result = self.execute_module(changed=False, failed=True) + self.assertIn("Invalid filters provided for module", str(result.get("msg")))