diff --git a/src/azure-cli-core/azure/cli/core/commands/__init__.py b/src/azure-cli-core/azure/cli/core/commands/__init__.py index fb2a9a3dece..3f251fc5213 100644 --- a/src/azure-cli-core/azure/cli/core/commands/__init__.py +++ b/src/azure-cli-core/azure/cli/core/commands/__init__.py @@ -506,6 +506,7 @@ class AzCliCommandInvoker(CommandInvoker): # pylint: disable=too-many-statements,too-many-locals,too-many-branches def execute(self, args): + args_copy = args[:] from knack.events import (EVENT_INVOKER_PRE_CMD_TBL_CREATE, EVENT_INVOKER_POST_CMD_TBL_CREATE, EVENT_INVOKER_CMD_TBL_LOADED, EVENT_INVOKER_PRE_PARSE_ARGS, EVENT_INVOKER_POST_PARSE_ARGS, @@ -586,7 +587,12 @@ def execute(self, args): args[0] = '--help' self.parser.enable_autocomplete() - + if '--what-if' in (args_copy): + return self._what_if(args_copy) + elif '--export-bicep' in (args_copy): + # --export-bicep must be used with --what-if + logger.error("The --export-bicep parameter must be used together with --what-if") + return CommandResultItem(None, exit_code=1, error=CLIError('The --export-bicep parameter must be used together with --what-if')) self.cli_ctx.raise_event(EVENT_INVOKER_PRE_PARSE_ARGS, args=args) parsed_args = self.parser.parse_args(args) self.cli_ctx.raise_event(EVENT_INVOKER_POST_PARSE_ARGS, command=parsed_args.command, args=parsed_args) @@ -691,6 +697,198 @@ def execute(self, args): table_transformer=self.commands_loader.command_table[parsed_args.command].table_transformer, is_query_active=self.data['query_active']) + def _what_if(self, args): + logger.debug("_what_if called with command: %s", args) + if '--what-if' in args: + logger.debug("Entering what-if mode") + + # Remove both --what-if and --export-bicep from args for processing + clean_args = [arg for arg in args if arg not in ['--what-if', '--export-bicep']] + command_parts = [arg for arg in clean_args if not arg.startswith('-') and arg != 'az'] + command_name = ' '.join(command_parts) if command_parts else 'unknown' + safe_params = AzCliCommandInvoker._extract_parameter_names(args) + + # Set command details first so telemetry knows which command was attempted + telemetry.set_command_details( + command_name + ' --what-if', + self.data.get('output', 'json'), + safe_params + ) + + # Check if command is in whitelist + if not self._is_command_supported_for_what_if(args): + error_msg = ("\"--what-if\" argument is not supported for this command.") + logger.error(error_msg) + telemetry.set_what_if_summary('what-if-unsupported-command') + telemetry.set_user_fault(summary='what-if-unsupported-command') + return CommandResultItem(None, exit_code=1, error=CLIError(error_msg)) + + from azure.cli.core.what_if import show_what_if + + # Check if --export-bicep is present + export_bicep = '--export-bicep' in args + + try: + if export_bicep: + logger.debug("Export bicep mode enabled") + + # Get subscription ID with priority: --subscription parameter > current login subscription + if '--subscription' in clean_args: + index = clean_args.index('--subscription') + if index + 1 < len(clean_args): + subscription_value = clean_args[index + 1] + subscription_id = subscription_value + else: + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(self.cli_ctx) + logger.debug("Using current login subscription ID: %s", subscription_id) + + clean_args = ["az"] + clean_args if clean_args[0] != 'az' else clean_args + command = " ".join(clean_args) + what_if_result = show_what_if(self.cli_ctx, command, subscription_id=subscription_id, export_bicep=export_bicep) + + # Save bicep templates if export_bicep is enabled and bicep_template exists + bicep_files = [] + if export_bicep and isinstance(what_if_result, dict) and 'bicep_template' in what_if_result: + bicep_files = self._save_bicep_templates(clean_args, what_if_result['bicep_template']) + what_if_result.pop('bicep_template', None) + + # Print bicep file locations if any were saved + if bicep_files: + from azure.cli.core.style import Style, print_styled_text + print_styled_text((Style.WARNING, "\nBicep templates saved to:")) + for file_path in bicep_files: + print_styled_text((Style.WARNING, f" {file_path}")) + print("") + + # Ensure output format is set for proper formatting + # Default to 'json' if not already set + if 'output' not in self.cli_ctx.invocation.data or self.cli_ctx.invocation.data['output'] is None: + self.cli_ctx.invocation.data['output'] = 'json' + + telemetry.set_what_if_summary('what-if-completed') + telemetry.set_success(summary='what-if-completed') + + # Return the formatted what-if output as the result + # Similar to the normal flow in execute() method + return CommandResultItem( + what_if_result, + table_transformer=None, + is_query_active=self.data.get('query_active', False), + exit_code=0 + ) + except (CLIError, ValueError, KeyError) as ex: + # If what-if service fails, still show an informative message + logger.error("What-if preview failed: %s", str(ex)) + telemetry.set_what_if_summary('what-if-failed') + telemetry.set_what_if_exception(ex) + telemetry.set_exception(ex, fault_type='what-if-error') + telemetry.set_failure(summary='what-if-failed') + return CommandResultItem(None, exit_code=1, + error=CLIError(f'What-if preview failed: {str(ex)}')) + + def _is_command_supported_for_what_if(self, args): + """Check if the command is in the what-if whitelist + + Args: + args: List of command arguments + + Returns: + bool: True if command is supported, False otherwise + """ + # Define supported commands for what-if functionality + WHAT_IF_SUPPORTED_COMMANDS = { + 'vm create', + 'vm update', + 'storage account create', + 'storage container create', + 'storage share create', + 'network vnet create', + 'network vnet update', + 'storage account network-rule add', + 'vm disk attach', + 'vm disk detach', + 'vm nic remove', + 'sql server create', + 'sql server update', + } + + # Extract command parts (skip 'az' and flags) + command_parts = [] + for arg in args: + if arg == 'az': + continue + if arg.startswith('-'): + break + command_parts.append(arg) + + # Join command parts to form the command string + if command_parts: + command = ' '.join(command_parts) + logger.debug("Checking what-if support for command: %s", command) + return command in WHAT_IF_SUPPORTED_COMMANDS + + return False + + def _save_bicep_templates(self, args, bicep_template): + """Save bicep templates to user's .azure directory + Returns a list of saved file paths + """ + saved_files = [] + try: + import os + from datetime import datetime + from azure.cli.core._environment import get_config_dir + + # Extract command name (first argument after 'az') + command_parts = [arg for arg in args if not arg.startswith('-') and arg != 'az'] + if not command_parts: + logger.warning("Could not determine command name for bicep file naming") + return saved_files + + first_command = command_parts[0] + az_command = f"az_{first_command}" + + # Get full command for file naming (e.g., az_vm_create) + if len(command_parts) > 1: + full_command = f"az_{command_parts[0]}_{command_parts[1]}" + else: + full_command = az_command + "_command" + + # Create timestamp in yyyymmddhhMMss format + timestamp = datetime.now().strftime("%Y%m%d%H%M%S") + + # Get .azure config directory + config_dir = get_config_dir() + whatif_dir = os.path.join(config_dir, 'whatif', az_command) + + # Create directories if they don't exist + os.makedirs(whatif_dir, exist_ok=True) + logger.debug("Created bicep template directory: %s", whatif_dir) + + # Save main template + if 'main_template' in bicep_template: + main_file = os.path.join(whatif_dir, f"{full_command}_main_{timestamp}.bicep") + with open(main_file, 'w', encoding='utf-8') as f: + f.write(bicep_template['main_template']) + logger.debug("Bicep main template saved to: %s", main_file) + saved_files.append(main_file) + + # Save module templates if they exist + if 'module_templates' in bicep_template and bicep_template['module_templates']: + for i, module_template in enumerate(bicep_template['module_templates'], 1): + module_suffix = f"module{i}" if i > 1 else "module" + module_file = os.path.join(whatif_dir, f"{full_command}_{module_suffix}_{timestamp}.bicep") + with open(module_file, 'w', encoding='utf-8') as f: + f.write(module_template) + logger.debug("Bicep module template saved to: %s", module_file) + saved_files.append(module_file) + + except Exception as ex: + logger.warning("Failed to save bicep templates: %s", str(ex)) + + return saved_files + @staticmethod def _extract_parameter_names(args): # note: name start with more than 2 '-' will be treated as value e.g. certs in PEM format diff --git a/src/azure-cli-core/azure/cli/core/commands/parameters.py b/src/azure-cli-core/azure/cli/core/commands/parameters.py index c098d1a42a1..bbdf2f83b90 100644 --- a/src/azure-cli-core/azure/cli/core/commands/parameters.py +++ b/src/azure-cli-core/azure/cli/core/commands/parameters.py @@ -268,6 +268,26 @@ def get_location_type(cli_ctx): return location_type +def get_what_if_type(): + what_if_type = CLIArgumentType( + options_list=['--what-if'], + help="Preview the changes that will be made without actually executing the command. " + "This will call the what-if service to compare the current state with the expected state after execution.", + is_preview=True + ) + return what_if_type + + +def get_export_bicep_type(): + export_bicep_type = CLIArgumentType( + options_list=['--export-bicep'], + help="Export the Bicep template corresponding to the what-if analysis. " + "This parameter must be used together with --what-if.", + is_preview=True + ) + return export_bicep_type + + deployment_name_type = CLIArgumentType( help=argparse.SUPPRESS, required=False, diff --git a/src/azure-cli-core/azure/cli/core/telemetry.py b/src/azure-cli-core/azure/cli/core/telemetry.py index 2388002f532..44d4b4f525a 100644 --- a/src/azure-cli-core/azure/cli/core/telemetry.py +++ b/src/azure-cli-core/azure/cli/core/telemetry.py @@ -78,6 +78,9 @@ def __init__(self, correlation_id=None, application=None): self.enable_broker_on_windows = None self.msal_telemetry = None self.login_experience_v2 = None + # what-if specific telemetry + self.what_if_summary = None + self.what_if_exception = None def add_event(self, name, properties): for key in self.instrumentation_key: @@ -234,6 +237,9 @@ def _get_azure_cli_properties(self): set_custom_properties(result, 'EnableBrokerOnWindows', str(self.enable_broker_on_windows)) set_custom_properties(result, 'MsalTelemetry', self.msal_telemetry) set_custom_properties(result, 'LoginExperienceV2', str(self.login_experience_v2)) + # what-if related + set_custom_properties(result, 'WhatIfSummary', self.what_if_summary) + set_custom_properties(result, 'WhatIfException', self.what_if_exception) return result @@ -486,6 +492,18 @@ def set_msal_telemetry(msal_telemetry): @decorators.suppress_all_exceptions() def set_login_experience_v2(login_experience_v2): _session.login_experience_v2 = login_experience_v2 + + +@decorators.suppress_all_exceptions() +def set_what_if_summary(summary): + _session.what_if_summary = summary + + +@decorators.suppress_all_exceptions() +def set_what_if_exception(exception): + # Store exception type and message, limit length to avoid huge payloads + exception_info = f"{exception.__class__.__name__}: {str(exception)[:512]}" + _session.what_if_exception = exception_info # endregion diff --git a/src/azure-cli-core/azure/cli/core/what_if.py b/src/azure-cli-core/azure/cli/core/what_if.py new file mode 100644 index 00000000000..4f56e185edf --- /dev/null +++ b/src/azure-cli-core/azure/cli/core/what_if.py @@ -0,0 +1,258 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import threading +import time +import sys +import json +from requests import Request, Session +from knack.log import get_logger +from knack.util import CLIError + +logger = get_logger(__name__) + +def read_script_file(script_path): + try: + with open(script_path, 'r', encoding='utf-8') as f: + return f.read() + except FileNotFoundError: + raise CLIError(f"Script file not found: {script_path}") + except Exception as ex: + raise CLIError(f"Error reading script file: {ex}") + + +def _get_auth_headers(cli_ctx, subscription_id): + from azure.cli.core._profile import Profile + + resource = cli_ctx.cloud.endpoints.active_directory_resource_id + profile = Profile(cli_ctx=cli_ctx) + + try: + token_result = profile.get_raw_token(resource, subscription=subscription_id) + token_info, _, _ = token_result + token_type, token, _ = token_info + except Exception as token_ex: + raise CLIError(f"Failed to get authentication token: {token_ex}") + + return { + 'Authorization': f'{token_type} {token}', + 'Content-Type': 'application/json' + } + + +def _make_what_if_request(payload, headers_dict, cli_ctx=None): + request_completed = threading.Event() + + def _rotating_progress(): + """Simulate a rotating progress indicator.""" + spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] + fallback_chars = ["|", "/", "-", "\\"] + + try: + "⠋".encode(sys.stderr.encoding or 'utf-8') + chars = spinner_chars + except (UnicodeEncodeError, UnicodeDecodeError, LookupError): + chars = fallback_chars + + use_color = cli_ctx and getattr(cli_ctx, 'enable_color', False) + if use_color: + try: + CYAN = '\033[36m' + GREEN = '\033[32m' + YELLOW = '\033[33m' + BLUE = '\033[34m' + RESET = '\033[0m' + BOLD = '\033[1m' + except (UnicodeError, AttributeError): + use_color = False + + if not use_color: + CYAN = GREEN = YELLOW = BLUE = RESET = BOLD = '' + + idx = 0 + start_time = time.time() + + # Simulate different stages, can be improved with real stages if available + while not request_completed.is_set(): + elapsed = time.time() - start_time + if elapsed < 10: + status = f"{CYAN}Connecting to what-if service{RESET}" + spinner_color = CYAN + elif elapsed < 30: + status = f"{BLUE}Analyzing Azure CLI script{RESET}" + spinner_color = BLUE + elif elapsed < 60: + status = f"{YELLOW}Processing what-if analysis{RESET}" + spinner_color = YELLOW + else: + status = f"{GREEN}Finalizing results{RESET}" + spinner_color = GREEN + elapsed_str = f"{BOLD}({elapsed:.0f}s){RESET}" + spinner = f"{spinner_color}{chars[idx % len(chars)]}{RESET}" + progress_line = f"{spinner} {status}... {elapsed_str}" + sys.stderr.write(f"\033[2K\r{progress_line}") + sys.stderr.flush() + idx += 1 + time.sleep(0.12) + sys.stderr.write("\033[2K\r") + sys.stderr.flush() + + try: + function_app_url = "https://azcli-script-insight.azurewebsites.net" + + progress_thread = threading.Thread(target=_rotating_progress) + progress_thread.daemon = True + progress_thread.start() + + session = Session() + logger.debug("url: %s/api/what_if_cli_preview; payload: %s", function_app_url, payload) + req = Request(method="POST", url=f"{function_app_url}/api/what_if_cli_preview", + headers=headers_dict, data=json.dumps(payload)) + prepared = session.prepare_request(req) + response = session.send(prepared) + logger.debug("response: %s", response) + request_completed.set() + progress_thread.join(timeout=0.5) + + return response + + except Exception as ex: + request_completed.set() + if 'progress_thread' in locals(): + progress_thread.join(timeout=0.5) + raise CLIError(f"Failed to connect to the what-if service: {ex}") + + +def convert_json_to_what_if_result(what_if_json_result): + from azure.cli.command_modules.resource._formatters import _change_type_to_weight, _property_change_type_to_weight + from collections import namedtuple + + enum_keys = list(_change_type_to_weight.keys()) + enum_mapping = {} + for enum_obj in enum_keys: + str_repr = str(enum_obj).lower() + if 'create' in str_repr: + enum_mapping['Create'] = enum_obj + elif 'delete' in str_repr: + enum_mapping['Delete'] = enum_obj + elif 'modify' in str_repr: + enum_mapping['Modify'] = enum_obj + elif 'deploy' in str_repr: + enum_mapping['Deploy'] = enum_obj + elif 'no_change' in str_repr or 'nochange' in str_repr: + enum_mapping['NoChange'] = enum_obj + elif 'ignore' in str_repr: + enum_mapping['Ignore'] = enum_obj + elif 'unsupported' in str_repr: + enum_mapping['Unsupported'] = enum_obj + elif 'no_effect' in str_repr or 'noeffect' in str_repr: + enum_mapping['NoEffect'] = enum_obj + + property_enum_keys = list(_property_change_type_to_weight.keys()) + property_enum_mapping = {} + for enum_obj in property_enum_keys: + str_repr = str(enum_obj).lower() + if 'create' in str_repr: + property_enum_mapping['Create'] = enum_obj + elif 'delete' in str_repr: + property_enum_mapping['Delete'] = enum_obj + elif 'modify' in str_repr: + property_enum_mapping['Modify'] = enum_obj + elif 'array' in str_repr: + property_enum_mapping['Array'] = enum_obj + elif 'no_effect' in str_repr or 'noeffect' in str_repr: + property_enum_mapping['NoEffect'] = enum_obj + + WhatIfOperationResult = namedtuple('WhatIfOperationResult', ['changes', 'potential_changes', 'diagnostics']) + ResourceChange = namedtuple('ResourceChange', ['change_type', 'resource_id', 'before', 'after', 'delta']) + PropertyChange = namedtuple('PropertyChange', ['property_change_type', 'path', 'before', 'after', 'children']) + + def _map_change_type_string(change_type_str): + return enum_mapping.get(change_type_str) + + def _map_property_change_type_string(property_change_type_str): + return property_enum_mapping.get(property_change_type_str) + + def _create_property_change(change_data): + property_change_type = _map_property_change_type_string( + change_data.get('propertyChangeType', 'NoEffect')) + path = change_data.get('path', '') + before = change_data.get('before') + after = change_data.get('after') + + children = [] + children_data = change_data.get('children', []) + for child_data in children_data: + children.append(_create_property_change(child_data)) + + return PropertyChange(property_change_type, path, before, after, children) + + def _create_resource_change(change_data): + change_type = _map_change_type_string(change_data.get('changeType', 'Unknown')) + resource_id = change_data.get('resourceId', '') + before = change_data.get('before') + after = change_data.get('after') + + delta = [] + delta_data = change_data.get('delta', []) + for property_data in delta_data: + delta.append(_create_property_change(property_data)) + + return ResourceChange(change_type, resource_id, before, after, delta) + + changes = [] + for change_data in what_if_json_result.get('changes', []): + changes.append(_create_resource_change(change_data)) + + potential_changes = [] + for change_data in what_if_json_result.get('potential_changes', []): + potential_changes.append(_create_resource_change(change_data)) + + return WhatIfOperationResult(changes, potential_changes, []) + + +def show_what_if(cli_ctx, azcli_script: str, subscription_id: str = None, no_pretty_print=False, export_bicep=False): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.command_modules.resource._formatters import format_what_if_operation_result + + if not subscription_id: + subscription_id = get_subscription_id(cli_ctx) + + payload = { + "azcli_script": azcli_script, + "export_bicep": export_bicep, + "subscription_id": subscription_id + } + + headers_dict = _get_auth_headers(cli_ctx, subscription_id) + response = _make_what_if_request(payload, headers_dict, cli_ctx) + + try: + raw_results = response.json() + # Only print raw results in debug mode + logger.debug("Raw what-if service response: %s", raw_results) + except ValueError as ex: + raise CLIError(f"Failed to parse response from what-if service: {ex}, raw response: {response.text}") + + success = raw_results.get('success') + if success is False: + raise CLIError(f"Errors from what-if service: {raw_results}") + if success is True: + what_if_result = raw_results.get('what_if_result', {}) + what_if_operation_result = convert_json_to_what_if_result(what_if_result) + + # If export_bicep is enabled and bicep_template exists, include it in the result + result_data = what_if_result.copy() + if export_bicep and 'bicep_template' in raw_results: + result_data['bicep_template'] = raw_results['bicep_template'] + logger.debug("Bicep template included in result: %s", raw_results['bicep_template']) + + if no_pretty_print: + return result_data + + print(format_what_if_operation_result(what_if_operation_result, cli_ctx.enable_color)) + + return result_data + raise CLIError(f"Unexpected response from what-if service, got: {raw_results}") diff --git a/src/azure-cli-telemetry/azure/cli/telemetry/__init__.py b/src/azure-cli-telemetry/azure/cli/telemetry/__init__.py index 628b863d3d6..b38bd7e8515 100644 --- a/src/azure-cli-telemetry/azure/cli/telemetry/__init__.py +++ b/src/azure-cli-telemetry/azure/cli/telemetry/__init__.py @@ -58,6 +58,7 @@ def save(config_dir, payload): events = json.loads(payload) logger.info('Begin splitting cli events and extra events, total events: %s', len(events)) + logger.debug('events: %s', events) cli_events = {} client = CliTelemetryClient() for key, event in events.items(): diff --git a/src/azure-cli/azure/cli/command_modules/network/_params.py b/src/azure-cli/azure/cli/command_modules/network/_params.py index 8dd89d8c0c2..daf9a6f4bc2 100644 --- a/src/azure-cli/azure/cli/command_modules/network/_params.py +++ b/src/azure-cli/azure/cli/command_modules/network/_params.py @@ -10,7 +10,8 @@ from azure.cli.core.commands.parameters import (get_location_type, get_resource_name_completion_list, tags_type, zone_type, zones_type, - file_type, get_three_state_flag, get_enum_type) + file_type, get_three_state_flag, get_enum_type, + get_what_if_type, get_export_bicep_type) from azure.cli.core.commands.validators import get_default_location_from_resource_group from azure.cli.core.commands.template_create import get_folded_parameter_help_string from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL @@ -693,6 +694,8 @@ def load_arguments(self, _): c.argument('vnet_name', virtual_network_name_type, options_list=['--name', '-n'], completer=None, local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.SET], scopes=[ALL])) c.argument('edge_zone', edge_zone) + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('network vnet create', arg_group='Subnet') as c: c.argument('subnet_name', help='Name of a new subnet to create within the VNet.', @@ -703,6 +706,8 @@ def load_arguments(self, _): with self.argument_context('network vnet update') as c: c.argument('address_prefixes', nargs='+') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('network vnet delete') as c: c.argument('virtual_network_name', local_context_attribute=None) diff --git a/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_create.py b/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_create.py index 1024e39f31b..7ddce4a2160 100644 --- a/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_create.py +++ b/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_create.py @@ -61,6 +61,20 @@ def _build_arguments_schema(cls, *args, **kwargs): help="The virtual network (VNet) name.", required=True, ) + _args_schema.what_if = AAZBoolArg( + options=["--what-if"], + help="Preview the changes that will be made without actually executing the command. " + "This will call the what-if service to compare the current state with the expected state after execution.", + default=False, + is_preview=True, + ) + _args_schema.export_bicep = AAZBoolArg( + options=["--export-bicep"], + help="Export the Bicep template corresponding to the what-if analysis. " + "This parameter must be used together with --what-if.", + default=False, + is_preview=True, + ) _args_schema.extended_location = AAZObjectArg( options=["--extended-location"], help="The extended location of the virtual network.", diff --git a/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_update.py b/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_update.py index 600935a2e66..4b8f5caf5ac 100644 --- a/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_update.py +++ b/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/vnet/_update.py @@ -112,6 +112,20 @@ def _build_arguments_schema(cls, *args, **kwargs): nullable=True, enum={"Basic": "Basic", "Disabled": "Disabled"}, ) + _args_schema.what_if = AAZBoolArg( + options=["--what-if"], + help="Preview the changes that will be made without actually executing the command. " + "This will call the what-if service to compare the current state with the expected state after execution.", + default=False, + is_preview=True, + ) + _args_schema.export_bicep = AAZBoolArg( + options=["--export-bicep"], + help="Export the Bicep template corresponding to the what-if analysis. " + "This parameter must be used together with --what-if.", + default=False, + is_preview=True, + ) address_prefixes = cls._args_schema.address_prefixes address_prefixes.Element = AAZStrArg( diff --git a/src/azure-cli/azure/cli/command_modules/resource/_formatters.py b/src/azure-cli/azure/cli/command_modules/resource/_formatters.py index 36b2bd8c954..d6246d396d7 100644 --- a/src/azure-cli/azure/cli/command_modules/resource/_formatters.py +++ b/src/azure-cli/azure/cli/command_modules/resource/_formatters.py @@ -101,7 +101,7 @@ def format_what_if_operation_result(what_if_operation_result, enable_color=True) def _format_noise_notice(builder): builder.append_line( """Note: The result may contain false positive predictions (noise). -You can help us improve the accuracy of the result by opening an issue here: https://aka.ms/WhatIfIssues""" +You can help us improve the accuracy of the result by opening an issue here: https://github.com/Azure/azure-cli/issues/new?template=what_if.yml""" ) builder.append_line() diff --git a/src/azure-cli/azure/cli/command_modules/storage/_params.py b/src/azure-cli/azure/cli/command_modules/storage/_params.py index 9185b7d35ac..ec01f018cd1 100644 --- a/src/azure-cli/azure/cli/command_modules/storage/_params.py +++ b/src/azure-cli/azure/cli/command_modules/storage/_params.py @@ -6,7 +6,8 @@ from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.validators import get_default_location_from_resource_group from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, - get_enum_type, get_three_state_flag, edge_zone_type) + get_enum_type, get_three_state_flag, edge_zone_type, + get_what_if_type, get_export_bicep_type) from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string, @@ -338,6 +339,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem c.argument('kind', help='Indicate the type of storage account.', arg_type=get_enum_type(t_kind), default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) c.argument('https_only', arg_type=get_three_state_flag(), help='Allow https traffic only to storage service if set to true. The default value is true.') c.argument('tags', tags_type) @@ -668,6 +671,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem c.argument('action', action_type) c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule') c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('storage account blob-service-properties', resource_type=ResourceType.MGMT_STORAGE) as c: @@ -1544,6 +1549,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'], arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True, help='Block override of encryption scope from the container default.') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('storage container delete') as c: c.argument('fail_not_exist', help='Throw an exception if the container does not exist.') @@ -1835,6 +1842,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem arg_type=get_three_state_flag(), help='Specifies whether the snapshot virtual directory should be accessible at the root of the ' 'share mount point when NFS is enabled. If not specified, it will be accessible.') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('storage share url') as c: c.extra('unc', action='store_true', help='Output UNC network path.') diff --git a/src/azure-cli/azure/cli/command_modules/storage/operations/account.py b/src/azure-cli/azure/cli/command_modules/storage/operations/account.py index 50a76358e0f..49b17b2eb23 100644 --- a/src/azure-cli/azure/cli/command_modules/storage/operations/account.py +++ b/src/azure-cli/azure/cli/command_modules/storage/operations/account.py @@ -79,7 +79,7 @@ def create_storage_account(cmd, resource_group_name, account_name, sku=None, loc immutability_period_since_creation_in_days=None, immutability_policy_state=None, allow_protected_append_writes=None, public_network_access=None, dns_endpoint_type=None, enable_smb_oauth=None, zones=None, zone_placement_policy=None, - enable_blob_geo_priority_replication=None): + enable_blob_geo_priority_replication=None, what_if=None, export_bicep=None): StorageAccountCreateParameters, Kind, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \ cmd.get_models('StorageAccountCreateParameters', 'Kind', 'Sku', 'CustomDomain', 'AccessTier', 'Identity', 'Encryption', 'NetworkRuleSet') @@ -746,7 +746,8 @@ def list_network_rules(client, resource_group_name, account_name): def add_network_rule(cmd, client, resource_group_name, account_name, action='Allow', subnet=None, - vnet_name=None, ip_address=None, tenant_id=None, resource_id=None): # pylint: disable=unused-argument + vnet_name=None, ip_address=None, tenant_id=None, resource_id=None, + what_if=False, export_bicep=False): # pylint: disable=unused-argument sa = client.get_properties(resource_group_name, account_name) rules = sa.network_rule_set if not subnet and not ip_address: diff --git a/src/azure-cli/azure/cli/command_modules/storage/operations/blob.py b/src/azure-cli/azure/cli/command_modules/storage/operations/blob.py index 6b09039eb19..e232ce46f14 100644 --- a/src/azure-cli/azure/cli/command_modules/storage/operations/blob.py +++ b/src/azure-cli/azure/cli/command_modules/storage/operations/blob.py @@ -120,7 +120,8 @@ def container_rm_exists(client, resource_group_name, account_name, container_nam # pylint: disable=unused-argument def create_container(client, container_name, resource_group_name=None, metadata=None, public_access=None, fail_on_exist=False, timeout=None, - default_encryption_scope=None, prevent_encryption_scope_override=None): + default_encryption_scope=None, prevent_encryption_scope_override=None, + what_if=None, export_bicep=None): encryption_scope = None if default_encryption_scope is not None or prevent_encryption_scope_override is not None: encryption_scope = { diff --git a/src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py b/src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py index 3b4aa5d2655..17f25d183d8 100644 --- a/src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py +++ b/src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py @@ -27,7 +27,8 @@ def list_shares(client, prefix=None, marker=None, num_results=None, return result -def create_share(cmd, client, metadata=None, quota=None, fail_on_exist=False, timeout=None, **kwargs): +def create_share(cmd, client, metadata=None, quota=None, fail_on_exist=False, timeout=None, + what_if=False, export_bicep=False, **kwargs): from azure.core.exceptions import HttpResponseError try: client.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) diff --git a/src/azure-cli/azure/cli/command_modules/vm/_params.py b/src/azure-cli/azure/cli/command_modules/vm/_params.py index 3fb5969f98d..d7aa5ee3de4 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/_params.py +++ b/src/azure-cli/azure/cli/command_modules/vm/_params.py @@ -13,7 +13,7 @@ from azure.cli.core.commands.validators import ( get_default_location_from_resource_group, validate_file_or_dict) from azure.cli.core.commands.parameters import ( - get_location_type, get_resource_name_completion_list, tags_type, get_three_state_flag, + get_location_type, get_what_if_type, get_export_bicep_type, get_resource_name_completion_list, tags_type, get_three_state_flag, file_type, get_enum_type, zone_type, zones_type) from azure.cli.command_modules.vm._actions import _resource_not_exists from azure.cli.command_modules.vm._completers import ( @@ -415,6 +415,8 @@ def load_arguments(self, _): c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm update') as c: + c.argument('what_if', get_what_if_type()) + c.argument('export_bicep', get_export_bicep_type()) c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") @@ -464,6 +466,8 @@ def load_arguments(self, _): c.argument('zone_placement_policy', arg_type=get_enum_type(self.get_models('ZonePlacementPolicyType')), min_api='2024-11-01', help="Specify the policy for virtual machine's placement in availability zone") c.argument('include_zones', nargs='+', min_api='2024-11-01', help='If "--zone-placement-policy" is set to "Any", availability zone selected by the system must be present in the list of availability zones passed with "--include-zones". If "--include-zones" is not provided, all availability zones in region will be considered for selection.') c.argument('exclude_zones', nargs='+', min_api='2024-11-01', help='If "--zone-placement-policy" is set to "Any", availability zone selected by the system must not be present in the list of availability zones passed with "excludeZones". If "--exclude-zones" is not provided, all availability zones in region will be considered for selection.') + c.argument('what_if', get_what_if_type()) + c.argument('export_bicep', get_export_bicep_type()) for scope in ['vm create', 'vm update']: with self.argument_context(scope) as c: @@ -552,11 +556,15 @@ def load_arguments(self, _): c.argument('source_disk_restore_point', options_list=['--source-disk-restore-point', '--source-disk-rp'], nargs='+', min_api='2024-11-01', help='create a data disk from a disk restore point. Can use the ID of a disk restore point.') c.argument('new_names_of_source_snapshots_or_disks', options_list=['--new-names-of-source-snapshots-or-disks', '--new-names-of-sr'], nargs='+', min_api='2024-11-01', help='The name of create new data disk from a snapshot or another disk.') c.argument('new_names_of_source_disk_restore_point', options_list=['--new-names-of-source-disk-restore-point', '--new-names-of-rp'], nargs='+', min_api='2024-11-01', help='The name of create new data disk from a disk restore point.') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') c.argument('force_detach', action='store_true', min_api='2020-12-01', help='Force detach managed data disks from a VM.') c.argument('disk_ids', nargs='+', min_api='2024-03-01', help='The disk IDs of the managed disk (space-delimited).') + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') @@ -626,6 +634,10 @@ def load_arguments(self, _): with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) + + with self.argument_context('vm nic remove') as c: + c.argument('what_if', arg_type=get_what_if_type()) + c.argument('export_bicep', arg_type=get_export_bicep_type()) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py b/src/azure-cli/azure/cli/command_modules/vm/custom.py index 1f27fbb83f4..14cfcfa89fb 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/custom.py +++ b/src/azure-cli/azure/cli/command_modules/vm/custom.py @@ -874,7 +874,7 @@ def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_ enable_user_redeploy_scheduled_events=None, zone_placement_policy=None, include_zones=None, exclude_zones=None, align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, - key_incarnation_id=None, add_proxy_agent_extension=None): + key_incarnation_id=None, add_proxy_agent_extension=None, what_if=False, export_bicep=False): from azure.cli.core.commands.client_factory import get_subscription_id from azure.cli.core.util import random_string, hash_string @@ -1652,7 +1652,7 @@ def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, add_proxy_agent_extension=None, wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, - key_incarnation_id=None, **kwargs): + key_incarnation_id=None, what_if=False, export_bicep=False, **kwargs): from azure.mgmt.core.tools import parse_resource_id, resource_id, is_valid_resource_id from ._vm_utils import update_write_accelerator_settings, update_disk_caching SecurityProfile, UefiSettings = cmd.get_models('SecurityProfile', 'UefiSettings') @@ -2148,7 +2148,8 @@ def show_default_diagnostics_configuration(is_windows_os=False): def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None, size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None, source_snapshots_or_disks=None, source_disk_restore_point=None, - new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None): + new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None, + what_if=False, export_bicep=False): # attach multiple managed disks using disk attach API vm = get_vm_to_update(cmd, resource_group_name, vm_name) if not new and not sku and not size_gb and disk_ids is not None: @@ -2273,7 +2274,8 @@ def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name): # endregion -def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None): +def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None, + what_if=False, export_bicep=False): if disk_ids is not None: data_disks = [] for disk_item in disk_ids: @@ -2716,7 +2718,8 @@ def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic) -def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): +def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None, + what_if=False, export_bicep=False): def to_delete(nic_id): return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()]