diff --git a/.bumpversion.cfg b/.bumpversion.cfg index d1639d8..159a8a9 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -14,6 +14,6 @@ replace = __version__ = {new_version} [semver] main_branches = development -major_branches = +major_branches = release, major minor_branches = feature patch_branches = hotfix, bugfix diff --git a/README.md b/README.md index 30bf002..e512f13 100644 --- a/README.md +++ b/README.md @@ -13,20 +13,25 @@ pip install path/to/deployer-.tar.gz Deployer is free for use by RightBrain Networks Clients however comes as is with out any guarantees. ##### Flags -* -c --config (REQUIRED) : Yaml configuration file to run against. +* -c --config : Yaml configuration file to run against. * -s --stack (REQUIRED) : Stack Name corresponding to a block in the config file. * -x --execute (REQUIRED) : create|update|delete|sync|change Action you wish to take on the stack. * -p --profile : AWS CLI Profile to use for AWS commands [CLI Getting Started](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). * -P --param , --param PARAM An override for a parameter +* -J --json-param :A JSON string for overriding a collection of parameters * -y --copy : Copy directory structures specified in the configuration file under the sync_dirs configuration. * -A --all : Create or Update all stacks in the configuration file, Note you do not have to specify a stack when using this option. -* -r --disable-roleback : Disable rollback on failure, useful when trying to debug a failing stack. +* -r --disable-rollback : Disable rollback on failure, useful when trying to debug a failing stack. * -t --timeout : Sets Stack create timeout * -e --events : Display events of the CloudFormation stack at regular intervals. -* -z --zip : Pip install requirements, and zip up lambda's for builds. +* -z --zip-lambas : Pip install requirements, and zip up lambda's for builds. * -t --change-set-name (REQUIRED Change Sets Only) : Used when creating a change set to name the change set. * -d --change-set-description (REQUIRED Change Sets Only) : Used when creating a change set to describe the change set. -* -j, --assume-valid Assumes templates are valid and does not do upstream validation (good for preventing rate limiting) +* -j --assume-valid : Assumes templates are valid and does not do upstream validation (good for preventing rate limiting) +* -O --export-yaml : Export stack config to specified YAML file. +* -o --export-json : Export stack config to specified JSON file. +* -i --config-version : Execute ( list | get | set ) of stack config. +* -n --config-version-number : Specified config version, used with --config-version option. * -D, --debug Sets logging level to DEBUG & enables traceback * -v, --version Print version number * --init [INIT] Initialize a skeleton directory @@ -58,6 +63,7 @@ Zip up lambdas, copy to s3, and update. *Note* See [example_configs/dev-us-east-1.yml](./example_configs/dev-us-east-1.yml) for an example configuration file. The config is a large dictionary. First keys within the dictionary are Stack Names. The global Environment Parameters is a common ground to deduplicate parameter entries that are used in each Stack. Stack parameters overwrite global parameters. +When deployer is run, it creates a DynamoDB Table called CloudFormation-Deployer if it does not already exist. The stack configuration in the config file is saved into DynamoDB, and any future changes result in a new entry with an updated timestamp. ## Required The following are required for each stack, they can be specified specifically to the stack or in the global config. @@ -122,6 +128,32 @@ These parameters provide identity to the Services like what AMI to use or what b UploadInstanceType: t2.medium ``` +Parameters can be overridden from the command line in several different ways. + +The first (which takes precedence) is the -P option. Parameters can be specified in the following form: +``` +deployer -P 'Param1=Value1' +``` +deployer will set the value of parameter 'Param1' to 'Value1', even if it is also specified in the config file. -P can be specified multiple times for multiple parameters + +The second is the -J option. Parameters can be specified in the following form: +``` +deployer -J '{"Param1":"Value1"}' +``` +This option allows the user to specify multiple parameter values as a single JSON object. This will override parameters of the same name as those specified in the config file as well. + +It is important to note that since a stack's configuration is saved in the DynamoDB state table, specifying these overrides without sending a config file will use the existing configuration for the stack retrieved from the table, but with the overridden parameter values swapped in. +If it is desirable to send a config file to update some of the parameter values but keep some of the existing values from the previous configuration, it can be done like this: +``` + parameters: + Monitoring: 'True' + NginxAMI: + UsePreviousValue: True + NginxInstanceType: t2.medium +``` +Notice that for the NginxAMI parameter, the value is now a dictionary instead of a string, and the UsePreviousValue key is set to True. This indicates to deployer to use the existing value in the configuration for the NginxAMI parameter. + + ## Lookup Parameters These are parameters that can be pulled from another stack's output. `deployer` tolerates but logs parameters that exist within the configuration but do not exist within the template. @@ -172,6 +204,30 @@ Denote that at tranform is used in a stack and deployer will automatically creat transforms: true ``` +## Versions +There are several command line options that allow the user to view and set the configuration based on version number. + +When a new configuration is saved automatically to the DynamoDB table, a version number is generated and assigned to it. These versions can be viewed like this: +``` +./deployer -s --config-version list +``` +This will output a list of config version numbers and creation timestamps. Viewing a specific configuration based on the number can be done like this: +``` +./deployer -s MyStack --config-version get --config-version-number 1 +``` +In the above example, the output will return the configuration for stack MyStack with the version number 1, the original configuration for the stack. We can then effectively roll back to that configuration with this command: +``` +./deployer -s MyStack --config-version set --config-version-number 1 +``` +This will set the configuration for MyStack back to version 1, reverting the values for parameters, tags, etc. + +## Exports +The configuration for a stack can be exported to a file as well. Two formats are supported, JSON and YAML. An example for each is shown here: +``` +./deployer -s MyStack --export-yaml ../mystack-config.yaml +./deployer -s MyStack --export-json configs/mystack-config.json +``` + ## Updates When running updates to a stack you'll be running updates to the CloudFormation Stack specified by Stack. @@ -225,7 +281,7 @@ Currenly there is only the Stack class, Network and Environment classes are now This is the class that builds zip archives for lambdas and copies directories to s3 given the configuration in the config file. **Note** -Network Class has been removed, it's irrelivant now. It was in place because of a work around in cloudformation limitations. The abstract class may not be relivant, all of the methods are simmular enough but starting this way provides flexablility if the need arise to model the class in a different way. +Network Class has been removed, it's irrelevant now. It was in place because of a work around in cloudformation limitations. The abstract class may not be relivant, all of the methods are simmular enough but starting this way provides flexablility if the need arise to model the class in a different way. # Config Updater @@ -353,3 +409,29 @@ Our top template contains numerous references to child templates. Using a combin ``` You can add your own templates under the `cloudformation` directory to deploy your own stacks. Each stack will also need an entry in your deployer config file to specify which directories should be uploaded, the name of the stack, and any required parameters. + +# Upgrade path to 1.0.0 + +A breaking change is made in the 1.0.0 release. The stack_name attribute in the stack configuration is now deprecated. The resulting CloudFormation stack that is created is now the name of the stack definition. For example, consider the following stack definition: + +``` +deployer: + stack_name: shared-deployer + template: cloudformation/deployer/top.yaml + parameters: + Environment: Something +``` + +In previous versions, the CloudFormation stack that gets deployed from this is called `shared-deployer`. In 1.0.0+, the CloudFormation stack that gets deployed is called `deployer`. + +This means that for existing configurations, the top level stack definition name must be changed to match the stack_name attribute, like this: + +``` +shared-deployer: + stack_name: shared-deployer + template: cloudformation/deployer/top.yaml + parameters: + Environment: Something +``` + +This will ensure that deployer recognizes the existing CloudFormation stack, rather than forcing you to create a new one. diff --git a/deployer/__init__.py b/deployer/__init__.py index e22154c..4700b1c 100755 --- a/deployer/__init__.py +++ b/deployer/__init__.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import argparse import json +import yaml import os from botocore.exceptions import ClientError from deployer.stack import Stack @@ -24,10 +25,11 @@ def main(): # Build arguement parser parser = argparse.ArgumentParser(description='Deploy CloudFormation Templates') - parser.add_argument("-c", "--config", help="Path to config file.") + parser.add_argument("-c", "--config", help="Path to config file.",default=None) parser.add_argument("-s", "--stack", help="Stack Name.") parser.add_argument("-x", "--execute", help="Execute ( create | update | delete | upsert | sync | change ) of stack.") parser.add_argument("-P", "--param", action='append', help='An override for a parameter') + parser.add_argument("-J", "--json-param", help='A JSON string for overriding a collection of parameters') parser.add_argument("-p", "--profile", help="Profile.",default=None) parser.add_argument("-t", "--change-set-name", help="Change Set Name.") parser.add_argument("-d", "--change-set-description", help="Change Set Description.") @@ -40,6 +42,10 @@ def main(): parser.add_argument("-D", "--debug", help="Sets logging level to DEBUG & enables traceback", action="store_true", dest="debug", default=False) parser.add_argument("-v", "--version", help='Print version number', action='store_true', dest='version') parser.add_argument("-T", "--timeout", type=int, help='Stack create timeout') + parser.add_argument("-O", "--export-yaml", help="Export stack config to specified YAML file.",default=None) + parser.add_argument("-o", "--export-json", help="Export stack config to specified JSON file.",default=None) + parser.add_argument("-i", "--config-version", help="Execute ( list | get | set ) of stack config.") + parser.add_argument("-n", "--config-version-number", help="Specified config version, used with --config-version option.") parser.add_argument('--init', default=None, const='.', nargs='?', help='Initialize a skeleton directory') parser.add_argument("--disable-color", help='Disables color output', action='store_true', dest='no_color') @@ -71,10 +77,19 @@ def main(): # Validate arguements and parameters options_broken = False params = {} - if not args.config: - args.config = 'config.yml' + if args.all: + if not args.config: + print(colors['warning'] + "Must Specify config flag!" + colors['reset']) + options_broken = True if not args.all: - if not args.execute: + if args.config_version: + if args.config_version != "list" and args.config_version != "set" and args.config_version != "get": + print(colors['warning'] + "config-version command '" + args.config_version + "' not recognized. Must be one of: list, set, get "+ colors['reset']) + options_broken = True + if (args.config_version == 'set' or args.config_version == 'get') and not args.config_version_number: + print(colors['warning'] + "config-version " + args.config_version + " requires config-version-number flag!" + colors['reset']) + options_broken = True + elif not args.execute: print(colors['warning'] + "Must Specify execute flag!" + colors['reset']) options_broken = True if not args.stack: @@ -89,6 +104,20 @@ def main(): print(colors['warning'] + "Invalid format for parameter '{}'".format(param) + colors['reset']) options_broken = True + try: + json_param_dict = {} + if args.json_param: + json_param_dict = json.loads(args.json_param) + if args.param: + #Merge the dicts + merged_params = {**json_param_dict, **params} + params = merged_params + else: + params = json_param_dict + except: + print(colors['warning'] + "Invalid format for json-param, must be valid json." + colors['reset']) + options_broken = True + # Print help output if options_broken: parser.print_help() @@ -100,33 +129,90 @@ def main(): console_logger.setLevel(logging.ERROR) try: - # Read Environment Config - with open(args.config) as f: - config = ruamel.yaml.safe_load(f) # Load stacks into queue stackQueue = [] if not args.all: stackQueue = [args.stack] else: - for stack in config.items(): + #Load config, get stacks + try: + with open(args.config) as f: + file_data = ruamel.yaml.safe_load(f) + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from config file {}: {}".format(file_name,msg)) + exit(3) + + for stack in file_data.keys(): if stack[0] != "global": - stackQueue = find_deploy_path(config, stack[0], stackQueue) + stackQueue = find_deploy_path(config_object.get_config(), stack[0], stackQueue) # Create or update all Environments for stack in stackQueue: if stack != 'global' and (args.all or stack == args.stack): logger.info("Running " + colors['underline'] + str(args.execute) + colors['reset'] + " on stack: " + colors['stack'] + stack + colors['reset']) - + + # Create deployer config object + cargs = { + 'profile': args.profile, + 'stack_name': stack + } + if args.config: + cargs['file_name'] = args.config + + if args.param or args.json_param: + cargs['override_params'] = params + + config_object = Config(**cargs) + + #Config Version Handling + if args.config_version: + if args.config_version == "list": + versions = config_object.list_versions() + for version in versions: + if 'version' in version: + print("Timestamp: {} Version: {}".format(version['timestamp'], version['version'])) + elif args.config_version == "get": + retrieved_config = config_object.get_version(args.config_version_number) + print(yaml.dump(retrieved_config,default_flow_style=False, allow_unicode=True)) + elif args.config_version == "set": + config_object.set_version(args.config_version_number) + + continue + + #Export if specified + if args.export_json: + config_dict = config_object.get_config() + + try: + with open(args.export_json, 'w') as f: + j = json.dumps(config_dict, indent=4) + f.write(j) + except Exception as e: + msg = str(e) + logger.error("Failed to export data to JSON file {}: {}".format(args.export_json,msg)) + exit(3) + + if args.export_yaml: + config_dict = config_object.get_config() + + try: + with open(args.export_yaml, 'w') as f: + yaml.dump(config_dict, f, default_flow_style=False, allow_unicode=True) + except Exception as e: + msg = str(e) + logger.error("Failed to export data to YAML file {}: {}".format(args.export_yaml,msg)) + exit(3) + # Build lambdas on `-z` if args.zip_lambdas: logger.info("Building lambdas for stack: " + stack) - LambdaPrep(args.config, args.stack).zip_lambdas() - - # Create deployer config object - config_object = Config(args.config, stack) - + lambda_dirs = config_object.get_config_att('lambda_dirs', []) + sync_base = config_object.get_config_att('sync_base', '.') + LambdaPrep(sync_base, lambda_dirs).zip_lambdas() + # AWS Session object session = Session(profile_name=args.profile, region_name=config_object.get_config_att('region')) @@ -141,7 +227,7 @@ def main(): # S3 bucket to sync to bucket = CloudtoolsBucket(session, config_object.get_config_att('sync_dest_bucket', None)) - + # Check whether stack is a stack set or not and assign corresponding object if(len(config_object.get_config_att('regions', [])) > 0 or len(config_object.get_config_att('accounts', [])) > 0): env_stack = StackSet(session, stack, config_object, bucket, arguements) @@ -149,7 +235,6 @@ def main(): if args.timeout and args.execute not in ['create', 'upsert']: logger.warning("Timeout specified but action is not 'create'. Timeout will be ignored.") env_stack = Stack(session, stack, config_object, bucket, arguements) - try: # Sync files to S3 @@ -185,6 +270,7 @@ def main(): if args.debug: tb = sys.exc_info()[2] traceback.print_tb(tb) + exit(1) def find_deploy_path(stackConfig, checkStack, resolved = []): #Generate depedency graph diff --git a/deployer/cloudformation.py b/deployer/cloudformation.py index 7f15735..c0a925a 100644 --- a/deployer/cloudformation.py +++ b/deployer/cloudformation.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -import git from abc import ABCMeta, abstractmethod from deployer.logger import logger @@ -52,20 +51,6 @@ def reload_stack_status(self): def status(self): pass - def get_repository(self, base): - try: - return git.Repo(base, search_parent_directories=True) - except git.exc.InvalidGitRepositoryError: - return None - - def get_repository_origin(self, repository): - try: - origin = repository.remotes.origin.url - return origin.split('@', 1)[-1] if origin else None - except (StopIteration, ValueError): - return None - return None - def get_template_body(self, bucket, template): if not bucket: try: diff --git a/deployer/configuration.py b/deployer/configuration.py index 73c633a..e386acb 100644 --- a/deployer/configuration.py +++ b/deployer/configuration.py @@ -2,13 +2,405 @@ from deployer.logger import logger from deployer.stack import Stack import ruamel.yaml, json, re +from collections import MutableMapping +from time import sleep +from boto3.session import Session +from copy import deepcopy +from datetime import datetime +import git class Config(object): - def __init__(self, file_name, master_stack): + def __init__(self, profile, stack_name, file_name=None, override_params=None): + self.table_name = "CloudFormation-Deployer" + self.index_name = "VersionIndex" + self.profile = profile + self.stack = stack_name self.file_name = file_name - self.config = self.get_config() - self.stack = master_stack + + self.file_data = self._get_file_data(file_name) + + #Create boto3 session and dynamo client + self.session = Session(profile_name=self.profile) + self.dynamo = self.session.client('dynamodb') + + #Create state table if necessary + if not self._table_exists(): + + #We must have a config file to populate the table + if not self.file_name: + logger.error("When creating a new state table, --config option is required") + exit(3) + + #Since it doesn't exist, create it + self._create_state_table() + + self.config = {} + self.version = 0 + self._get_stack_config(override_params) + + def _get_file_data(self, file_name=None): + file_data = None + + if file_name: + try: + with open(file_name) as f: + file_data = ruamel.yaml.safe_load(f) + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from config file {}: {}".format(file_name,msg)) + exit(3) + + return file_data + + def _get_stack_config(self, params=None): + + #Get the most recent stack config from Dynamo + try: + dynamo_args = { + 'TableName': self.table_name, + 'KeyConditionExpression': "#sn = :sn", + 'ExpressionAttributeNames': { + '#sn': 'stackname' + }, + 'ExpressionAttributeValues': { + ':sn': { + 'S': self.stack + } + }, + 'ScanIndexForward': False, + 'Limit': 1 + } + + query_resp = self.dynamo.query(**dynamo_args) + + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from dynamo state table {} for stack {}: {}".format(self.table_name, stack_context, msg)) + exit(3) + + data = {} + if query_resp['Count'] > 0: + #Format the stack config data + item = query_resp['Items'][0] + data = self._recursive_dynamo_to_data(item) + if 'version' in data and data['version'].isdigit(): + self.version = int(data['version']) + data = data['stackconfig'] + + if self.file_data: + if self.stack in self.file_data: + config_copy = self._handle_use_previous_value(data, self.file_data[self.stack]) + + #Merge the file data for the stack if applicable, global first + if 'global' in self.file_data: + global_copy = self._handle_use_previous_value(data, self.file_data['global']) + merged_global = self._dict_merge(global_copy, config_copy) + config_copy = merged_global + + data = config_copy + + if params: + #Merge the override params for the stack if applicable + param_data = { + "parameters": params + } + merged_params = self._dict_merge(data, param_data) + data = merged_params + + sts = self.session.client('sts') + self.identity_arn = sts.get_caller_identity().get('Arn', '') + + # Load values from methods for config lookup + self.base = data.get('sync_base', '.') + self.repository = self.get_repository(self.base) + self.commit = self.repository.head.object.hexsha if self.repository else 'null' + self.origin = self.get_repository_origin(self.repository) if self.repository else 'null' + + if not data.get('release', False): + data['release'] = self.commit + + if params or self.file_data: + self._update_state_table(self.stack, data) + + self.config[self.stack] = data + + return data + + def _handle_use_previous_value(self, olddata, paramdict): + dict_copy = deepcopy(paramdict) + # First look for indicators to use previous value, remove it from the dict if it is true + if 'parameters' in dict_copy: + for paramkey in dict_copy['parameters'].keys(): + if isinstance(dict_copy['parameters'][paramkey],dict): + if "UsePreviousValue" in dict_copy['parameters'][paramkey]: + if dict_copy['parameters'][paramkey]["UsePreviousValue"]: + if 'parameters' in olddata and paramkey in olddata['parameters']: + dict_copy['parameters'][paramkey] = olddata['parameters'][paramkey] + else: + dict_copy['parameters'].pop(paramkey) + return dict_copy + + def _table_exists(self): + resp_tables = self.dynamo.list_tables() + if self.table_name in resp_tables['TableNames']: + resp_table = self.dynamo.describe_table(TableName=self.table_name) + if resp_table['Table']['TableStatus'] == 'ACTIVE': + return True + return False + def _create_state_table(self): + + #Set up the arguments + kwargs = { + 'AttributeDefinitions':[ + { + 'AttributeName': 'stackname', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'timestamp', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'version', + 'AttributeType': 'S' + } + ], + 'TableName': self.table_name, + 'KeySchema':[ + { + 'AttributeName': 'stackname', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'timestamp', + 'KeyType': 'RANGE' + } + ], + 'LocalSecondaryIndexes':[ + { + 'IndexName': self.index_name, + 'KeySchema': [ + { + 'AttributeName': 'stackname', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'version', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + } + }, + ], + 'BillingMode': 'PAY_PER_REQUEST' + } + + #Create Dynamo DB state table + try: + logger.info("Attempting to create state table") + response = self.dynamo.create_table(**kwargs) + + #Waiting for the table to exist + counter = 0 + limit = 10 + while counter < limit: + sleep(1) + if self._table_exists(): + return + counter+=1 + + raise Exception("Timeout occurred while waiting for Dynamo table creation") + + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from dynamo state table {}: {}".format(self.table_name,msg)) + exit(3) + + return + + def _update_state_table(self, stack, data): + + #Convert to Dynamo params + stackdata = deepcopy(data) + stack_config = self._recursive_data_to_dynamo(stackdata) + timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H:%M:%S.%f") + #Increment version + self.version+=1 + item = { + "stackname": { "S": stack }, + "version": { "S": str(self.version)}, + "timestamp": { "S": timestamp}, + "stackconfig": stack_config, + "caller": { "S": self.identity_arn}, + "commit": { "S": self.commit}, + "origin": { "S": self.origin}, + } + + #Set up the API arguments + kwargs = { + "TableName": self.table_name, + "Item": item + } + + try: + response = self.dynamo.put_item(**kwargs) + except Exception as e: + msg = str(e) + logger.error("Failed to update data to dynamo state table {}: {}".format(self.table_name,msg)) + exit(3) + + return + + def list_versions(self): + + try: + dynamo_args = { + 'TableName': self.table_name, + 'IndexName': self.index_name, + 'ConsistentRead': True, + 'KeyConditionExpression': "#sn = :sn", + 'ExpressionAttributeNames': { + '#sn': 'stackname', + "#tm": 'timestamp' + }, + 'ExpressionAttributeValues': { + ':sn': { + 'S': self.stack + } + }, + 'ProjectionExpression': "version, #tm", + 'ScanIndexForward': False + } + + query_resp = self.dynamo.query(**dynamo_args) + + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from dynamo state table {} for stack {}: {}".format(self.table_name, self.stack, msg)) + exit(3) + + if query_resp['Count'] <= 0: + logger.error("Failed to retrieve versions from dynamo state table {} for stack {}: No versions exist".format(self.table_name, self.stack)) + exit(3) + + #Format the data + items = [] + for item in query_resp['Items']: + items.append(self._recursive_dynamo_to_data(item)) + + return items + + def get_version(self, version): + try: + dynamo_args = { + 'TableName': self.table_name, + 'IndexName': self.index_name, + 'ConsistentRead': True, + 'KeyConditionExpression': "#sn = :sn AND #vn = :vn", + 'ExpressionAttributeNames': { + '#sn': 'stackname', + '#vn': 'version', + }, + 'ExpressionAttributeValues': { + ':sn': { + 'S': self.stack + }, + ':vn': { + 'S': version + } + }, + 'ScanIndexForward': False, + } + + query_resp = self.dynamo.query(**dynamo_args) + + except Exception as e: + msg = str(e) + logger.error("Failed to retrieve data from dynamo state table {} for stack {}: {}".format(self.table_name, self.stack, msg)) + exit(3) + + if query_resp['Count'] <= 0: + logger.error("Failed to retrieve versions from dynamo state table {} for stack {}: Version '{}' does not exist".format(self.table_name, self.stack, version)) + exit(3) + + #Format the data + item = self._recursive_dynamo_to_data(query_resp['Items'][0]) + + return item + + def set_version(self, version): + item = self.get_version(version) + + stackconfig = item['stackconfig'] + self._update_state_table(self.stack, stackconfig) + + self.config[self.stack] = stackconfig + + return + + def _recursive_data_to_dynamo(self, param): + + if isinstance(param, dict): + paramdict = {} + for key in param.keys(): + if param[key] != '': + paramdict[key] = self._recursive_data_to_dynamo(param[key]) + return {'M': paramdict} + elif isinstance(param, list): + return {'L': [ self._recursive_data_to_dynamo(item) for item in param ] } + + #For everything else, force it to be a string type for Dynamo + + return {'S': param} + + def _recursive_dynamo_to_data(self, param): + if isinstance(param, dict): + paramdict = {} + for key in param.keys(): + if key == 'S': + return str(param[key]) + elif key == 'L': + newlist = [self._recursive_dynamo_to_data(item) for item in param[key]] + return newlist + elif key == 'M': + return self._recursive_dynamo_to_data(param[key]) + else: + paramdict[str(key)] = self._recursive_dynamo_to_data(param[key]) + return paramdict + + return param + + def _dict_merge(self, old, new): + #Recursively go through the nested dictionaries, with values in + # 'new' overwriting the values in 'old' for the same key + + for k, v in old.items(): + if k in new: + if all(isinstance(e, MutableMapping) for e in (v, new[k])): + new[k] = self._dict_merge(v, new[k]) + merged = old.copy() + merged.update(new) + return merged + + def construct_tags(self): + tags = self.get_config_att('tags') + if tags: + tags = [ { 'Key': key, 'Value': value } for key, value in tags.items() ] + if len(tags) > 47: + raise ValueError('Resources tag limit is 50, you have provided more than 47 tags. Please limit your tagging, save room for name and deployer tags.') + else: + tags = [] + tags.append({'Key': 'deployer:stack', 'Value': self.stack}) + tags.append({'Key': 'deployer:caller', 'Value': self.identity_arn}) + tags.append({'Key': 'deployer:git:commit', 'Value': self.commit}) + tags.append({'Key': 'deployer:git:origin', 'Value': self.origin}) + if self.file_name: + tags.append({'Key': 'deployer:config', 'Value': self.file_name.replace('\\', '/')}) + return tags + def build_params(self, session, stack_name, release, params, temp_file): # create parameters from the config.yml file self.parameter_file = "%s-params.json" % stack_name @@ -66,16 +458,29 @@ def build_params(self, session, stack_name, release, params, temp_file): return_params.remove(item) logger.info("Parameters Created") return return_params + + def get_repository(self, base): + try: + return git.Repo(base, search_parent_directories=True) + except git.exc.InvalidGitRepositoryError: + return None - def get_config(self): - with open(self.file_name) as f: - data = ruamel.yaml.safe_load(f) - return data + def get_repository_origin(self, repository): + try: + origin = repository.remotes.origin.url + return origin.split('@', 1)[-1] if origin else None + except (StopIteration, ValueError): + return None + return None def get_config_att(self, key, default=None, required=False): base = self.config.get('global', {}).get(key, None) base = self.config.get(self.stack).get(key, base) if required and base is None: - logger.error("Required attribute '{}' not found in config '{}'.".format(key, self.file_name)) + logger.error("Required attribute '{}' not found in config.".format(key)) exit(3) - return base if base is not None else default \ No newline at end of file + return base if base is not None else default + + def get_config(self): + return self.config + diff --git a/deployer/lambda_prep.py b/deployer/lambda_prep.py index c1b9028..713cd2c 100755 --- a/deployer/lambda_prep.py +++ b/deployer/lambda_prep.py @@ -8,12 +8,10 @@ class LambdaPrep: - def __init__(self, config_file, environment): - self.config_file = config_file - self.config = self.get_config(config_file) - self.environment = environment - self.lambda_dirs = self.get_config_att('lambda_dirs', []) - self.sync_base = self.get_config_att('sync_base', '.') + def __init__(self, sync_base, lambda_dirs): + + self.lambda_dirs = lambda_dirs + self.sync_base = sync_base if not isinstance(self.lambda_dirs, list): logger.error("Attribute 'lambda_dirs' must be a list.") @@ -21,19 +19,6 @@ def __init__(self, config_file, environment): elif not self.lambda_dirs: logger.warning("Lambda packaging requested but no directories specified with the 'lambda_dirs' attribute") - def get_config(self, config): - with open(config) as f: - data = yaml.safe_load(f) - return data - - def get_config_att(self, key, default=None, required=False): - base = self.config.get('global', {}).get(key, None) - base = self.config.get(self.environment).get(key, base) - if required and base is None: - logger.error("Required attribute '{}' not found in config '{}'.".format(key, self.config_file)) - exit(3) - return base if base is not None else default - # zip_lambdas() will traverse through our configured lambda_dirs array, # create a temp lambda directory, install necessary dependencies, # zip it, move it, and cleanup all temp artifacts diff --git a/deployer/stack.py b/deployer/stack.py index f9b9b30..47bdeee 100644 --- a/deployer/stack.py +++ b/deployer/stack.py @@ -29,26 +29,18 @@ def __init__(self, session, stack, config, bucket, args = {}): self.params = args.get('params', {}) # Load values from config - self.stack_name = self.config.get_config_att('stack_name', required=True) - self.base = self.config.get_config_att('sync_base', '.') - - # Load values from methods for config lookup - self.repository = self.get_repository(self.base) - self.commit = self.repository.head.object.hexsha if self.repository else 'null' + self.stack_name = stack # Load values from config - self.release = self.config.get_config_att('release', self.commit).replace('/','.') + self.release = self.config.get_config_att('release').replace('/','.') self.template = self.config.get_config_att('template', required=True) self.timeout = self.config.get_config_att('timeout') if not self.timed_out else None self.transforms = self.config.get_config_att('transforms') # Intialize objects self.client = self.session.client('cloudformation') - self.sts = self.session.client('sts') # Load values from methods - self.origin = self.get_repository_origin(self.repository) if self.repository else 'null' - self.identity_arn = self.sts.get_caller_identity().get('Arn', '') self.template_url = self.bucket.construct_template_url(self.config, self.stack, self.release, self.template) # self.construct_template_url() self.template_file = self.bucket.get_template_file(self.config, self.stack) self.template_body = self.bucket.get_template_body(self.config, self.template) @@ -71,22 +63,6 @@ def reload_change_set_status(self, change_set_name): self.change_set_status = 'False' return self.change_set_status - def construct_tags(self): - tags = self.config.get_config_att('tags') - if tags: - tags = [ { 'Key': key, 'Value': value } for key, value in tags.items() ] - if len(tags) > 47: - raise ValueError('Resources tag limit is 50, you have provided more than 47 tags. Please limit your tagging, save room for name and deployer tags.') - else: - tags = [] - tags.append({'Key': 'deployer:stack', 'Value': self.stack}) - tags.append({'Key': 'deployer:caller', 'Value': self.identity_arn}) - tags.append({'Key': 'deployer:git:commit', 'Value': self.commit}) - tags.append({'Key': 'deployer:git:origin', 'Value': self.origin}) - tags.append({'Key': 'deployer:config', 'Value': self.config.file_name.replace('\\', '/')}) - return tags - - def create_waiter(self, start_time): waiter = self.client.get_waiter('stack_create_complete') logger.info("Creation Started") @@ -275,7 +251,7 @@ def create_stack(self): "StackName": self.stack_name, "Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file), "DisableRollback": self.disable_rollback, - "Tags": self.construct_tags(), + "Tags": self.config.construct_tags(), "Capabilities": [ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', @@ -302,7 +278,7 @@ def update_stack(self): args = { "StackName": self.stack_name, "Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file), - "Tags": self.construct_tags(), + "Tags": self.config.construct_tags(), "Capabilities": [ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', @@ -367,4 +343,4 @@ def reload_stack_status(self): self.stack_status = resp['Stacks'][0]['StackStatus'] except Exception: self.stack_status = 'False' - return self.stack_status \ No newline at end of file + return self.stack_status diff --git a/deployer/stack_sets.py b/deployer/stack_sets.py index 665c348..8891984 100644 --- a/deployer/stack_sets.py +++ b/deployer/stack_sets.py @@ -26,27 +26,19 @@ def __init__(self, session, stack, config, bucket, args = {}): self.print_events = args.get('print_events', False) - # Load values from methods for config lookup - self.base = self.config.get_config_att('sync_base', '.') - self.repository = self.get_repository(self.base) - self.commit = self.repository.head.object.hexsha if self.repository else 'null' - # Load values from config - self.release = self.config.get_config_att('release', self.commit).replace('/','.') + self.release = self.config.get_config_att('release').replace('/','.') self.template = self.config.get_config_att('template', required=True) self.account = self.config.get_config_att('account', None) self.accounts = self.config.get_config_att('accounts', None) self.execution_role = self.config.get_config_att('execution_role', None) self.regions = self.config.get_config_att('regions', None) - self.stack_name = self.config.get_config_att('stack_name', required=True) + self.stack_name = stack # Intialize objects self.client = self.session.client('cloudformation') - self.sts = self.session.client('sts') # Load values from methods - self.origin = self.get_repository_origin(self.repository) if self.repository else 'null' - self.identity_arn = self.sts.get_caller_identity().get('Arn', '') self.template_url = self.bucket.construct_template_url(self.config, self.stack, self.release, self.template) # self.construct_template_url() self.template_file = self.bucket.get_template_file(self.config, self.stack) self.template_body = self.bucket.get_template_body(self.config, self.template) @@ -122,7 +114,7 @@ def create_stack(self): ], "Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file), 'StackSetName': self.stack_name, - "Tags": self.construct_tags() + "Tags": self.config.construct_tags() } if self.template_body: logger.info("Using local template due to null template bucket") @@ -173,7 +165,7 @@ def stack_set_waiter(self, operation_id, verb="Update"): # Print result results = self.client.list_stack_set_operation_results(**args) headers = ['Account', 'Region', 'Status', 'Reason'] - table = [[x['Account'], x['Region'], x['Status'], x.get('StatusReason', '')] for x in results['Summaries']] + table = [[x['Account'], x['Region'], x['Status'], x.get("AccountGateResult",{}).get('StatusReason', '')] for x in results['Summaries']] print(tabulate.tabulate(table, headers, tablefmt='simple')) @@ -188,7 +180,7 @@ def update_stack(self): ], "Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file), 'StackSetName': self.stack_name, - "Tags": self.construct_tags(), + "Tags": self.config.construct_tags(), } args.update({'AdministrationRoleARN': self.administration_role} if self.administration_role else {}) @@ -291,18 +283,3 @@ def delete_stack_instances(self, accounts, regions): logger.info("Deleting " + str(len(accounts) * len(regions)) + " stack instances...") result = self.client.delete_stack_instances(StackSetName=self.stack_name, Accounts=accounts, Regions=regions, RetainStacks=False) return result['OperationId'] - - def construct_tags(self): - tags = self.config.get_config_att('tags') - if tags: - tags = [ { 'Key': key, 'Value': value } for key, value in tags.items() ] - if len(tags) > 47: - raise ValueError('Resources tag limit is 50, you have provided more than 47 tags. Please limit your tagging, save room for name and deployer tags.') - else: - tags = [] - tags.append({'Key': 'deployer:stack', 'Value': self.stack}) - tags.append({'Key': 'deployer:caller', 'Value': self.identity_arn}) - tags.append({'Key': 'deployer:git:commit', 'Value': self.commit}) - tags.append({'Key': 'deployer:git:origin', 'Value': self.origin}) - tags.append({'Key': 'deployer:config', 'Value': self.config.file_name.replace('\\', '/')}) - return tags \ No newline at end of file diff --git a/deployer/tests.py b/deployer/tests.py index 1178f07..cabf839 100644 --- a/deployer/tests.py +++ b/deployer/tests.py @@ -1,9 +1,11 @@ import unittest import __init__ as deployer import boto3, json -import sys, subprocess, os, shutil, time +import sys, subprocess, os, re, shutil, time +from subprocess import Popen, PIPE from botocore.exceptions import ClientError import yaml +import pytz from datetime import tzinfo, timedelta, datetime deployerExecutor = "./__init__.py" @@ -88,6 +90,7 @@ def test_intialize(self): #Checks if a basic stack can be created def test_create(self): + testStackName="test" reset_config() #Make sure no stack exists @@ -104,14 +107,43 @@ def test_create(self): raise exit self.assertEqual(get_stack_status(testStackName), 'CREATE_COMPLETE') + + # Checks if state table CloudFormation-Deployer was created in DynamoDB + def test_state_table(self): + reset_config() + + testStackName = "test" + + #Create test stack + if(get_stack_status(testStackName) == "NULL"): + create_test_stack(testStackName) + + time.sleep(apiHitRate) + + # + try: + output = subprocess.check_output(['python', deployerExecutor, '-x', 'upsert', '-s','test','-D']) + except SystemExit as exit: + if exit.code != 0: + raise exit + + time.sleep(apiHitRate) + + #Wait for result + while("IN_PROGRESS" in get_stack_status(testStackName)): + time.sleep(apiHitRate) + + self.assertEqual(get_stack_status(testStackName), "UPDATE_COMPLETE") #Checks if a basic stack can be deleted def test_delete(self): reset_config() + + testStackName = "test" #Create test stack if(get_stack_status(testStackName) == "NULL"): - create_test_stack() + create_test_stack(testStackName) time.sleep(apiHitRate) @@ -146,7 +178,8 @@ def test_config_updater(self): def test_update(self): reset_config() - create_test_stack() + testStackName = "test" + create_test_stack(testStackName) while("IN_PROGRESS" in get_stack_status(testStackName)): time.sleep(apiHitRate) subprocess.check_output(['python', configUpdateExecutor, '-c', testStackConfig, '-u', json.dumps({"global":{'tags':{ 'Environment' : 'stack-updated' }}})]) @@ -183,12 +216,17 @@ def test_sync(self): if e.code != 0: raise e - s3obj = simplestorageservice.get_object(Bucket=testBucket, Key="deployer-test/tests/cloudformation.yaml") - self.assertTrue(s3obj['LastModified'] > datetime.now(UTC()) - timedelta(seconds=10)) + #If the file exists, it was a successful sync + try: + s3obj = simplestorageservice.get_object(Bucket=testBucket, Key="deployer-test/tests/cloudformation.yaml") + except Exception as e: + # Boto will raise an exception if the key does not exist, indicating that the sync failed. + raise e # Checks if a basic stack can be created def test_timeout(self): reset_config() + testStackName = "timeout" # Make sure no stack exists if get_stack_status(testStackName) != "NULL": @@ -206,10 +244,10 @@ class IntegrationLambdaTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(IntegrationLambdaTestCase, self).__init__(*args, **kwargs) self.client = boto3.client('cloudformation') - self.stack_name = 'deployer-lambda-test' + self.stack_name = 'create' def stack_create(self): - result = subprocess.call(['deployer', '-x', 'create', '-c', 'tests/config/lambda.yaml', '-s' 'create', '-P', 'Cli=create', '-yzD']) + result = subprocess.call(['deployer', '-x', 'create', '-c', 'tests/config/lambda.yaml', '-s' 'create', '-yzD']) self.assertEqual(result, 0) stack = self.client.describe_stacks(StackName=self.stack_name) @@ -225,7 +263,7 @@ def stack_create(self): client = boto3.client('lambda') resp = client.invoke(FunctionName=func[0]) - self.assertNotEquals(resp.get("Payload", None), None) + self.assertNotEqual(resp.get("Payload", None), None) payload = json.loads(resp['Payload'].read()) self.assertEqual(payload.get("message", ''), "hello world") @@ -267,7 +305,7 @@ class IntegrationStackTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(IntegrationStackTestCase, self).__init__(*args, **kwargs) self.client = boto3.client('cloudformation') - self.stack_name = 'deployer-test' + self.stack_name = 'create' def stack_create(self): result = subprocess.call(['deployer', '-x', 'create', '-c', 'tests/config/test.yaml', '-s' 'create', '-P', 'Cli=create', '-D']) @@ -275,7 +313,7 @@ def stack_create(self): stack = self.client.describe_stacks(StackName=self.stack_name) self.assertIn('Stacks', stack.keys()) - self.assertEquals(len(stack['Stacks']), 1) + self.assertEqual(len(stack['Stacks']), 1) outputs = stack['Stacks'][0].get('Outputs', []) self.assertIn('create', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Cli']) @@ -294,14 +332,14 @@ def stack_create(self): self.assertIn('deployer:stack', [x['Key'] for x in tags]) def stack_delete(self): - result = subprocess.call(['deployer', '-x', 'delete', '-c', 'tests/config/test.yaml', '-s' 'update', '-D']) + result = subprocess.call(['deployer', '-x', 'delete', '-c', 'tests/config/test.yaml', '-s' 'create', '-D']) self.assertEqual(result, 0) try: stack = self.client.describe_stacks(StackName=self.stack_name) self.assertIn('Stacks', stack.keys()) - self.assertEquals(len(stack['Stacks']), 1) - self.assertEquals(stack['Stacks'][0].get('StackStatus', ''), 'DELETE_IN_PROGRESS') + self.assertEqual(len(stack['Stacks']), 1) + self.assertEqual(stack['Stacks'][0].get('StackStatus', ''), 'DELETE_IN_PROGRESS') self.stack_wait() except ClientError as e: self.assertIn('does not exist', str(e)) @@ -316,12 +354,12 @@ def stack_reset(self): self.assertIn('does not exist', str(e)) def stack_update(self): - result = subprocess.call(['deployer', '-x', 'update', '-c', 'tests/config/test.yaml', '-s' 'update', '-P', 'Cli=update', '-D']) + result = subprocess.call(['deployer', '-x', 'update', '-c', 'tests/config/test.yaml', '-s' 'create', '-P', 'Cli=update', '-P', 'Local=update', '-P', 'Override=update', '-D']) self.assertEqual(result, 0) stack = self.client.describe_stacks(StackName=self.stack_name) self.assertIn('Stacks', stack.keys()) - self.assertEquals(len(stack['Stacks']), 1) + self.assertEqual(len(stack['Stacks']), 1) outputs = stack['Stacks'][0].get('Outputs', []) self.assertIn('update', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Cli']) @@ -331,14 +369,72 @@ def stack_update(self): self.assertIn('prod', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Release']) tags = stack['Stacks'][0].get('Tags', []) - self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Local']) - self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Override']) + #self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Local']) + #self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Override']) self.assertIn('deployer:caller', [x['Key'] for x in tags]) self.assertIn('deployer:config', [x['Key'] for x in tags]) self.assertIn('deployer:git:commit', [x['Key'] for x in tags]) self.assertIn('deployer:git:origin', [x['Key'] for x in tags]) self.assertIn('deployer:stack', [x['Key'] for x in tags]) + def stack_list_version(self): + try: + encoding = 'ascii' + list_version = subprocess.Popen(['deployer', '-i', 'list', '-s' 'create', '-D'], stdout=subprocess.PIPE) + regex = r"Version: (.*)" + output = list_version.communicate()[0].decode(encoding) + results = re.findall(regex, output) + results = [int(i) for i in results] + last_version = max(results) + self.rollback_version = str(last_version) + + stack = self.client.describe_stacks(StackName=self.stack_name) + self.assertIn('Stacks', stack.keys()) + self.assertEqual(len(stack['Stacks']), 1) + + except SystemExit as exit: + if exit.code != 0: + raise exit + + def stack_get_version(self): + try: + get_version = subprocess.call(['deployer', '-s' 'create', '-i', 'get', '-n', self.rollback_version]) + self.assertEqual(get_version, 0) + + stack = self.client.describe_stacks(StackName=self.stack_name) + self.assertIn('Stacks', stack.keys()) + self.assertEqual(len(stack['Stacks']), 1) + + except SystemExit as exit: + if exit.code != 0: + raise exit + + def stack_set_version(self): + try: + set_version = subprocess.call(['deployer', '-s' 'create', '-i', 'set', '-n', self.rollback_version]) + self.assertEqual(set_version, 0) + + stack = self.client.describe_stacks(StackName=self.stack_name) + self.assertIn('Stacks', stack.keys()) + self.assertEqual(len(stack['Stacks']), 1) + + except SystemExit as exit: + if exit.code != -0: + raise exit + + def stack_rollback_version(self): + try: + rollback = subprocess.call(['deployer', '-x', 'update', '-s' 'create', '-D']) + self.assertEqual(rollback, 0) + + stack = self.client.describe_stacks(StackName=self.stack_name) + self.assertIn('Stacks', stack.keys()) + self.assertEqual(len(stack['Stacks']), 1) + + except SystemExit as exit: + if exit.code != -0: + raise exit + def stack_wait(self): waiter = self.client.get_waiter('stack_delete_complete') waiter.wait(StackName=self.stack_name) @@ -347,6 +443,10 @@ def test_stack(self): self.stack_reset() self.stack_create() self.stack_update() + self.stack_list_version() + self.stack_get_version() + self.stack_set_version() + self.stack_rollback_version() self.stack_delete() @@ -355,7 +455,7 @@ class IntegrationStackSetTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(IntegrationStackSetTestCase, self).__init__(*args, **kwargs) self.client = boto3.client('cloudformation') - self.stackset_name = 'deployer-stackset-test' + self.stackset_name = 'create' def stackset_create(self): result = subprocess.call(['deployer', '-x', 'create', '-c', 'tests/config/stackset.yaml', '-s' 'create', '-P', 'Cli=create', '-D']) @@ -364,14 +464,14 @@ def stackset_create(self): instances = self.client.list_stack_instances(StackSetName=self.stackset_name) accounts = set([x['Account'] for x in instances.get('Summaries', [])]) regions = set([x['Region'] for x in instances.get('Summaries', [])]) - self.assertEquals(len(accounts), 1) - self.assertEquals(len(regions), 1) + self.assertEqual(len(accounts), 1) + self.assertEqual(len(regions), 2) for instance in [x for x in instances.get('Summaries', [])]: client = boto3.client('cloudformation', region_name=instance['Region']) stack = client.describe_stacks(StackName=instance['StackId']) self.assertIn('Stacks', stack.keys()) - self.assertEquals(len(stack['Stacks']), 1) + self.assertEqual(len(stack['Stacks']), 1) outputs = stack['Stacks'][0].get('Outputs', []) self.assertIn('create', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Cli']) @@ -390,7 +490,7 @@ def stackset_create(self): self.assertIn('deployer:stack', [x['Key'] for x in tags]) def stackset_delete(self): - result = subprocess.call(['deployer', '-x', 'delete', '-c', 'tests/config/stackset.yaml', '-s' 'update', '-D']) + result = subprocess.call(['deployer', '-x', 'delete', '-c', 'tests/config/stackset.yaml', '-s' 'create', '-D']) self.assertEqual(result, 0) self.assertRaises(ClientError, self.client.describe_stack_set, StackSetName=self.stackset_name) @@ -415,38 +515,38 @@ def stackset_reset(self): time.sleep(5) status = self.client.describe_stack_set_operation(StackSetName=self.stackset_name, OperationId=op) - self.assertEquals(status['StackSetOperation']['Status'], 'SUCCEEDED') + self.assertEqual(status['StackSetOperation']['Status'], 'SUCCEEDED') self.client.delete_stack_set(StackSetName=self.stackset_name) except ClientError as e: self.assertIn('StackSetNotFoundException', str(e)) def stackset_update(self): - result = subprocess.call(['deployer', '-x', 'update', '-c', 'tests/config/stackset.yaml', '-s' 'update', '-P', 'Cli=update', '-D']) + result = subprocess.call(['deployer', '-x', 'update', '-c', 'tests/config/stackset.yaml', '-s' 'create', '-P', 'Cli=update', '-D']) self.assertEqual(result, 0) instances = self.client.list_stack_instances(StackSetName=self.stackset_name) accounts = set([x['Account'] for x in instances.get('Summaries', [])]) regions = set([x['Region'] for x in instances.get('Summaries', [])]) - self.assertEquals(len(accounts), 1) - self.assertEquals(len(regions), 2) + self.assertEqual(len(accounts), 1) + self.assertEqual(len(regions), 2) for instance in [x for x in instances.get('Summaries', [])]: client = boto3.client('cloudformation', region_name=instance['Region']) stack = client.describe_stacks(StackName=instance['StackId']) self.assertIn('Stacks', stack.keys()) - self.assertEquals(len(stack['Stacks']), 1) + self.assertEqual(len(stack['Stacks']), 1) outputs = stack['Stacks'][0].get('Outputs', []) self.assertIn('update', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Cli']) self.assertIn('global', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Global']) - self.assertIn('update', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Local']) - self.assertIn('update', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Override']) + self.assertIn('create', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Local']) + self.assertIn('create', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Override']) self.assertIn('prod', [x['OutputValue'] for x in outputs if x['OutputKey'] == 'Release']) tags = stack['Stacks'][0].get('Tags', []) - self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Local']) - self.assertIn('update', [x['Value'] for x in tags if x['Key'] == 'Override']) + self.assertIn('create', [x['Value'] for x in tags if x['Key'] == 'Local']) + self.assertIn('create', [x['Value'] for x in tags if x['Key'] == 'Override']) self.assertIn('deployer:caller', [x['Key'] for x in tags]) self.assertIn('deployer:config', [x['Key'] for x in tags]) self.assertIn('deployer:git:commit', [x['Key'] for x in tags]) @@ -495,7 +595,7 @@ def get_stack_tag(stack, tag): return None #Create test stack -def create_test_stack(): +def create_test_stack(testStackName): try: result = cloudformation.describe_stacks(StackName=testStackName) if 'Stacks' in result: @@ -525,11 +625,17 @@ def reset_config(): with open(testStackConfig, "w") as config: config.write(testStackConfig_data) +def cleanup(): + cloudformation.delete_stack(StackName="test") + cloudformation.delete_stack(StackName="timeout") + print("Deleted test stacks") + def main(): reset_config() - unittest.main() - cloudformation.delete_stack(StackName=testStackName) - + tests = unittest.main(exit=False) + cleanup() + if not tests.result.wasSuccessful(): + sys.exit(1) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/deployer/tests/config/lambda.yaml b/deployer/tests/config/lambda.yaml index e970290..13b230d 100644 --- a/deployer/tests/config/lambda.yaml +++ b/deployer/tests/config/lambda.yaml @@ -20,4 +20,4 @@ create: - tests/cloudformation/lambda - tests/lambda parameters: - Bucket: cloudtools-us-east-1 \ No newline at end of file + Bucket: cloudtools-us-east-1 diff --git a/deployer/tests/config/stackset.yaml b/deployer/tests/config/stackset.yaml index 1b69a96..bd0b8e7 100644 --- a/deployer/tests/config/stackset.yaml +++ b/deployer/tests/config/stackset.yaml @@ -22,6 +22,7 @@ create: - '356438515751' regions: - us-east-1 + - us-west-2 sync_dirs: - tests/cloudformation/stack parameters: @@ -51,4 +52,4 @@ update: Override: update tags: Local: update - Override: update \ No newline at end of file + Override: update diff --git a/deployer/tests/config/test.yaml b/deployer/tests/config/test.yaml index ec510a2..e8a79a0 100644 --- a/deployer/tests/config/test.yaml +++ b/deployer/tests/config/test.yaml @@ -46,4 +46,4 @@ update: OutputKey: Resource tags: Local: update - Override: update \ No newline at end of file + Override: update