diff --git a/.gitignore b/.gitignore index d927ce1..8e40d21 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ .env -*/__pycache__ \ No newline at end of file +*/__pycache__ +venv +*/venv +.vscode +*/.vscode \ No newline at end of file diff --git a/Makefile b/Makefile index b5a8b53..ee8dab3 100644 --- a/Makefile +++ b/Makefile @@ -42,4 +42,8 @@ worker-deploy: ecr-scan: @echo "make ecr-scan" docker-compose -f docker-compose.yml run --rm ecr-scan - \ No newline at end of file + +test: + @echo "Running tests" + python -m unittest src/*.py + diff --git a/src/app-spec.tpl.json b/src/app-spec.tpl.json deleted file mode 100755 index eb12352..0000000 --- a/src/app-spec.tpl.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "revisionType": "AppSpecContent", - "appSpecContent": { - "content": "{\"version\":1,\"Resources\":[{\"TargetService\":{\"Type\":\"AWS::ECS::Service\",\"Properties\":{\"TaskDefinition\":\"$TASK_ARN\",\"LoadBalancerInfo\":{\"ContainerName\":\"$APP_NAME\",\"ContainerPort\":$CONTAINER_PORT}$CAPACITY_PROVIDER_STRATEGY}}}]}" - } -} diff --git a/src/codedeploy.py b/src/codedeploy.py deleted file mode 100755 index 60cd875..0000000 --- a/src/codedeploy.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -import boto3 - -class DeployClient(object): - def __init__(self): - self.boto = boto3.client(u'codedeploy') - - def list_deployments(self, application_name, deployment_group, statuses=['InProgress', 'Ready']): - result = self.boto.list_deployments( - applicationName=application_name, - deploymentGroupName=deployment_group, - includeOnlyStatuses=statuses - ) - self.deployments = result['deployments'] - return result - - def create_deployment(self, application_name, deployment_config_name, deployment_group, revision): - result = self.boto.create_deployment( - applicationName=application_name, - deploymentGroupName=deployment_group, - deploymentConfigName=deployment_config_name, - description='Deployment', - revision=revision - ) - self.deploymentId = result['deploymentId'] - return result - - def continue_deployment(self, deployment_id): - return self.boto.continue_deployment( - deploymentId=deployment_id, - deploymentWaitType='READY_WAIT' - ) - - def get_deployment(self, deployment_id): - result = self.boto.get_deployment(deploymentId=deployment_id) - self.status = result['deploymentInfo']['status'] - return result - - def stop_deployment(self, deployment_id, auto_rollback=True): - return self.boto.stop_deployment( - deploymentId=deployment_id, - autoRollbackEnabled=auto_rollback - ) diff --git a/src/codedeploy_client.py b/src/codedeploy_client.py new file mode 100755 index 0000000..e0427d0 --- /dev/null +++ b/src/codedeploy_client.py @@ -0,0 +1,68 @@ +import os +import boto3 +import json + +from utils import json_template + + +class CodeDeployClient(object): + def __init__(self): + self.boto = boto3.client("codedeploy") + + def create_app_spec( + self, file_name: str, task_arn: str, capacity_provider_strategy=None + ): + env_vars = dict(os.environ) + env_vars["TASK_ARN"] = task_arn + env_vars["CAPACITY_PROVIDER_STRATEGY"] = "" + if capacity_provider_strategy: + env_vars["CAPACITY_PROVIDER_STRATEGY"] = ( + ',\\"CapacityProviderStrategy\\":[\\"%s\\"]' + % capacity_provider_strategy + ) + try: + app_spec_tpl = json_template(file_name, env_vars) + except Exception as err: + print("Error: Templating app spec :", err) + exit(1) + print("App spec file content: \n%s" % app_spec_tpl) + return json.loads(app_spec_tpl) + + def list_deployments( + self, application_name, deployment_group, statuses=["InProgress", "Ready"] + ): + result = self.boto.list_deployments( + applicationName=application_name, + deploymentGroupName=deployment_group, + includeOnlyStatuses=statuses, + ) + self.deployments = result["deployments"] + return result + + def create_deployment( + self, application_name, deployment_config_name, deployment_group, revision + ): + result = self.boto.create_deployment( + applicationName=application_name, + deploymentGroupName=deployment_group, + deploymentConfigName=deployment_config_name, + description="Deployment", + revision=revision, + ) + self.deploymentId = result["deploymentId"] + return result + + def continue_deployment(self, deployment_id): + return self.boto.continue_deployment( + deploymentId=deployment_id, deploymentWaitType="READY_WAIT" + ) + + def get_deployment(self, deployment_id): + result = self.boto.get_deployment(deploymentId=deployment_id) + self.status = result["deploymentInfo"]["status"] + return result + + def stop_deployment(self, deployment_id, auto_rollback=True): + return self.boto.stop_deployment( + deploymentId=deployment_id, autoRollbackEnabled=auto_rollback + ) diff --git a/src/codedeploy_client_test.py b/src/codedeploy_client_test.py new file mode 100644 index 0000000..bf66b4c --- /dev/null +++ b/src/codedeploy_client_test.py @@ -0,0 +1,38 @@ +import unittest +import json +import os + +from codedeploy_client import CodeDeployClient + + +class CodeDeployClientTest(unittest.TestCase): + def setUp(self): + os.environ["APP_NAME"] = "test-app" + os.environ["CONTAINER_PORT"] = "8080" + self.codedeploy_client = CodeDeployClient() + # self.deploy = Deploy() + + def test_create_app_spec_should_success_with_capacity_provider_strategy(self): + task_def_arn = "arn:aws:1234567890:asdf" + capacity_provider_strategy = "asdfasdf" + result = self.codedeploy_client.create_app_spec( + "../templates/app-spec.tpl.json", task_def_arn, capacity_provider_strategy + ) + with open("./tests/app-spec-with-capacity-provider-strategy.json", "r") as f: + expected_json = f.read() + expected = json.loads(expected_json) + self.assertEqual(result, expected) + + def test_create_app_spec_should_success_without_capacity_provider_strategy(self): + task_def_arn = "arn:aws:1234567890:asdf" + result = self.codedeploy_client.create_app_spec( + "../templates/app-spec.tpl.json", task_def_arn + ) + with open("./tests/app-spec-without-capacity-provider-strategy.json", "r") as f: + expected_json = f.read() + expected = json.loads(expected_json) + self.assertEqual(result, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/deploy.py b/src/deploy.py index 1862e2d..2df5a20 100755 --- a/src/deploy.py +++ b/src/deploy.py @@ -1,172 +1,199 @@ -#!/usr/bin/env python3 - import os import json import time -from ecs import EcsClient -from codedeploy import DeployClient +from ecs_client import EcsClient +from codedeploy_client import CodeDeployClient from utils import validate_envs, json_template -# ----- Check variables ----- -print('Step 1: Checking environment variables \n') - -req_vars = [ - 'CLUSTER_NAME', - 'APP_NAME', - 'AWS_DEFAULT_REGION' -] - -try: - validate_envs(req_vars) -except: - exit(1) - -cluster_name = os.getenv('CLUSTER_NAME') -app_name = os.getenv('APP_NAME') -aws_default_region = os.getenv('AWS_DEFAULT_REGION') -launchtype = os.getenv('SERVICE_TYPE') -subnets = os.getenv('SUBNETS') -security_groups = os.getenv('SECURITY_GROUPS') -task_def_file_name = os.getenv('TPL_FILE_NAME', 'task-definition.tpl.json') -app_spec_file_name = os.getenv('APPSPEC_FILE_NAME', 'app-spec.tpl.json') -capacity_provider_strategy = os.getenv('CAPACITY_PROVIDER_STRATEGY') - -# ----- Create task definition file ----- -print('Step 2: Replace variables inside of %s \n' % task_def_file_name) - -try: - task_definition = json_template(task_def_file_name) -except: - exit(1) - -print('Task definition file: \n%s' % task_definition) -task_def = json.loads(task_definition) - -# ----- Register task definition file ----- -print('Step 3: Registering task definition \n') -task = EcsClient() - -try: - task.register_task_definition(task_def) - print('Task definition arn: %s \n' % task.taskDefArn) -except Exception as err: - print('Register task definition issue: %s' % err) - exit(1) - -# ----- Code Deploy ----- -print('Step 4: Creating App Spec for CodeDeploy \n') - -env_vars = dict(os.environ) -env_vars['TASK_ARN'] = task.taskDefArn -env_vars['CAPACITY_PROVIDER_STRATEGY'] = '' -if capacity_provider_strategy: - env_vars['CAPACITY_PROVIDER_STRATEGY'] = ',\"CapacityProviderStrategy\":[\'%s\']' % capacity_provider_strategy - -try: - app_spec_tpl = json_template(app_spec_file_name, env_vars) -except: - exit(1) - -print('App spec file: \n%s' % app_spec_tpl) -app_spec = json.loads(app_spec_tpl) - -# ----- Create Deployment ----- -print('Step 5: Creating Deployment \n') -deploy = DeployClient() - -application_name = '-'.join([cluster_name, app_name]) -deployment_config_name = 'CodeDeployDefault.ECSAllAtOnce' -deployment_group = application_name - -try: - deploy.list_deployments(application_name, deployment_group) - if len(deploy.deployments) > 0: - raise Exception('Deployment in progress: https://%s.console.aws.amazon.com/codesuite/codedeploy/deployments/%s' % - (aws_default_region, deploy.deployments[0])) -except Exception as err: - print('Error: %s' % str(err)) - exit(1) - -try: - deploy.create_deployment( - application_name, deployment_config_name, deployment_group, app_spec) - print('Successfully created deployment: %s' % deploy.deploymentId) - print('For more info, you can follow your deployment at: https://%s.console.aws.amazon.com/codesuite/codedeploy/deployments/%s \n' % - (aws_default_region, deploy.deploymentId)) -except: - print('Deployment of application %s on deployment group %s failed' % - (application_name, deployment_group)) - exit(1) - -# ----- Monitor Deployment ----- -print('Step 6: Deployment Overview \n') - -print('Monitoring deployment %s for %s on deployment group %s' % (deploy.deploymentId, application_name, deployment_group)) - -while not hasattr(task, 'taskSetId'): - # set task.taskSetId - task.describe_services(cluster_name, app_name) - time.sleep(2) - -print('Task Set ID: %s \n' % task.taskSetId) - -print('Monitoring ECS service events for cluster %s on service %s:\n' % (cluster_name, app_name)) - -deploy_timeout_period = 0 -deploy_timeout = int(os.getenv('DEPLOYMENT_TIMEOUT', 900)) - -# deploy.status -deploy.get_deployment(deploy.deploymentId) - -def stop_deploy(deployment_id): - try: - deploy.stop_deployment(deployment_id) - print('Rollback deployment success') - except: - print('Rollback deployment failed') - finally: - exit(1) - -while deploy.status in ['Created', 'InProgress', 'Queued']: - # Tail logs from ECS service - ecs_events = task.tail_ecs_events(cluster_name, app_name) - for event in ecs_events: - print('%s %s' % ('{0:%Y-%m-%d %H:%M:%S %z}'.format(event['createdAt']), event['message'])) - - # Check if containers are being stoped - last_task = task.list_tasks(cluster_name, task.taskSetId) - if len(last_task['taskArns']) > 2: - last_task_info = task.describe_tasks(cluster_name, last_task['taskArns']) - last_task_status = last_task_info['tasks'][0]['lastStatus'] - last_task_reason = last_task_info['tasks'][0]['stoppedReason'] - - if last_task_status == 'STOPPED': - print('Containers are being stoped: %s' % last_task_reason) - stop_deploy(deploy.deploymentId) - - # Rechead limit - if deploy_timeout_period >= deploy_timeout: - print('Deployment timeout: %s seconds' % deploy_timeout) - stop_deploy(deploy.deploymentId) - - # Get status, increment limit and sleep - deploy.get_deployment(deploy.deploymentId) - deploy_timeout_period += 2 - time.sleep(2) - -# Print Status -deployment_info = deploy.get_deployment(deploy.deploymentId) - -print() -if deploy.status == "Ready": - print('Deployment of application %s on deployment group %s ready and waiting for cutover' % (application_name, deployment_group)) - exit(0) - -if deploy.status == "Succeeded": - print('Deployment of application %s on deployment group %s succeeded' % (application_name, deployment_group)) - exit(0) - -if deployment_info.get('deploymentInfo', {}).get('errorInformation'): - print('Deployment failed: %s' % deployment_info.get('deploymentInfo', {}).get('errorInformation', {}).get('code')) - print('Error: %s' % deployment_info.get('deploymentInfo', {}).get('errorInformation', {}).get('message')) - exit(1) \ No newline at end of file + +class Deploy(object): + def __init__(self): + self.name = "Deploy" + self.required_vars = ["CLUSTER_NAME", "APP_NAME", "AWS_DEFAULT_REGION"] + self.debug = True + self.cluster_name = os.getenv("CLUSTER_NAME") + self.app_name = os.getenv("APP_NAME") + self.aws_default_region = os.getenv("AWS_DEFAULT_REGION") + self.launchtype = os.getenv("SERVICE_TYPE") + self.subnets = os.getenv("SUBNETS") + self.security_groups = os.getenv("SECURITY_GROUPS") + self.task_def_file_name = os.getenv("TPL_FILE_NAME", "task-definition.tpl.json") + self.app_spec_file_name = os.getenv("APPSPEC_FILE_NAME", "app-spec.tpl.json") + self.capacity_provider_strategy = os.getenv("CAPACITY_PROVIDER_STRATEGY") + + self.taskSetId = None + + self.ecs_client = EcsClient() + self.codedeploy_client = CodeDeployClient() + + def create_task_definition(self, file_name: str): + try: + task_definition = json_template(file_name) + except Exception as err: + print("Error: Templating task definition: ", err) + exit(1) + print("Task definition file: \n%s" % task_definition) + return json.loads(task_definition) + + def run(self): + print("Step 1: Validating environment variables \n") + try: + validate_envs(self.required_vars) + except Exception as err: + print("Error: Validating environment variables: ", err) + exit(1) + + print("Step 2: Replace variables inside of %s \n" % self.task_def_file_name) + task_def = self.create_task_definition(self.task_def_file_name) + + print("Step 3: Registering task definition \n") + try: + self.ecs_client.register_task_definition(task_def) + print("Task definition arn: %s \n" % self.ecs_client.taskDefArn) + except Exception as err: + print("Error: Register task definition: ", err) + exit(1) + + print("Step 4: Creating App Spec for CodeDeploy \n") + task_arn = self.ecs_client.taskDefArn + app_spec = self.codedeploy_client.create_app_spec( + self.app_spec_file_name, task_arn, self.capacity_provider_strategy + ) + + print("Step 5: Creating Deployment \n") + application_name = "-".join([self.cluster_name, self.app_name]) + deployment_config_name = "CodeDeployDefault.ECSAllAtOnce" + deployment_group = application_name + + try: + self.codedeploy_client.list_deployments(application_name, deployment_group) + if len(self.codedeploy_client.deployments) > 0: + raise Exception( + "Deployment in progress: https://%s.console.aws.amazon.com/codesuite/codedeploy/deployments/%s" + % (self.aws_default_region, self.codedeploy_client.deployments[0]) + ) + except Exception as err: + print("Error: Listing deployments failed: %s" % str(err)) + exit(1) + + try: + self.codedeploy_client.create_deployment( + application_name, deployment_config_name, deployment_group, app_spec + ) + print( + "Successfully created deployment: %s" + % self.codedeploy_client.deploymentId + ) + print( + "For more info, you can follow your deployment at: https://%s.console.aws.amazon.com/codesuite/codedeploy/deployments/%s \n" + % (self.aws_default_region, self.codedeploy_client.deploymentId) + ) + except Exception as err: + print( + "Error: Deployment of application %s on deployment group %s failed: %s" + % (application_name, deployment_group, err) + ) + exit(1) + + print("Step 6: Deployment Overview \n") + print( + "Monitoring deployment %s for %s on deployment group %s" + % (self.codedeploy_client.deploymentId, application_name, deployment_group) + ) + while not hasattr(self.ecs_client, "taskSetId"): + self.ecs_client.describe_services(self.cluster_name, self.app_name) + time.sleep(2) + print("Task Set ID: %s \n" % self.ecs_client.taskSetId) + print( + "Monitoring ECS service events for cluster %s on service %s:\n" + % (self.cluster_name, self.app_name) + ) + + deploy_timeout_period = 0 + deploy_timeout = int(os.getenv("DEPLOYMENT_TIMEOUT", 900)) + + self.codedeploy_client.get_deployment(self.codedeploy_client.deploymentId) + + def stop_deploy(deployment_id): + try: + self.codedeploy_client.stop_deployment(deployment_id) + print("Rollback deployment success") + except: + print("Rollback deployment failed") + finally: + exit(1) + + while self.codedeploy_client.status in ["Created", "InProgress", "Queued"]: + # Tail logs from ECS service + ecs_events = self.ecs_client.tail_ecs_events( + self.cluster_name, self.app_name + ) + for event in ecs_events: + print( + "%s %s" + % ( + "{0:%Y-%m-%d %H:%M:%S %z}".format(event["createdAt"]), + event["message"], + ) + ) + + # Check if containers are being stoped + last_task = self.ecs_client.list_tasks( + self.cluster_name, self.ecs_client.taskSetId + ) + if len(last_task["taskArns"]) > 2: + last_task_info = self.ecs_client.describe_tasks( + self.cluster_name, last_task["taskArns"] + ) + last_task_status = last_task_info["tasks"][0]["lastStatus"] + last_task_reason = last_task_info["tasks"][0]["stoppedReason"] + + if last_task_status == "STOPPED": + print("Containers are being stoped: %s" % last_task_reason) + stop_deploy(self.codedeploy_client.deploymentId) + + # Rechead limit + if deploy_timeout_period >= deploy_timeout: + print("Deployment timeout: %s seconds" % deploy_timeout) + stop_deploy(self.codedeploy_client.deploymentId) + + # Get status, increment limit and sleep + self.codedeploy_client.get_deployment(self.codedeploy_client.deploymentId) + deploy_timeout_period += 2 + time.sleep(2) + + deployment_info = deploy.get_deployment(deploy.deploymentId) + print() + if deploy.status == "Ready": + print( + "Deployment of application %s on deployment group %s ready and waiting for cutover" + % (application_name, deployment_group) + ) + exit(0) + + if deploy.status == "Succeeded": + print( + "Deployment of application %s on deployment group %s succeeded" + % (application_name, deployment_group) + ) + exit(0) + + if deployment_info.get("deploymentInfo", {}).get("errorInformation"): + print( + "Deployment failed: %s" + % deployment_info.get("deploymentInfo", {}) + .get("errorInformation", {}) + .get("code") + ) + print( + "Error: %s" + % deployment_info.get("deploymentInfo", {}) + .get("errorInformation", {}) + .get("message") + ) + exit(1) + + +if __name__ == "__main__": + deploy = Deploy() + deploy.run() diff --git a/src/deploy_test.py b/src/deploy_test.py new file mode 100644 index 0000000..a1f8b58 --- /dev/null +++ b/src/deploy_test.py @@ -0,0 +1,32 @@ +import unittest +import os +import json + +from deploy import Deploy + + +class DeployTest(unittest.TestCase): + def setUp(self): + os.environ["IMAGE_NAME"] = "dnxlabs/docker-ecs" + os.environ["DEFAULT_COMMAND"] = '"ls -la"' + os.environ["CPU"] = "1500" + os.environ["MEMORY"] = "3000" + os.environ["APP_NAME"] = "test-app" + os.environ["CONTAINER_PORT"] = "8080" + os.environ["CLUSTER_NAME"] = "test-cluster" + os.environ["AWS_DEFAULT_REGION"] = "ap-southeast-2" + os.environ["AWS_ACCOUNT_ID"] = "1234567890" + self.deploy = Deploy() + + def test_create_task_definition(self): + result = self.deploy.create_task_definition( + "../templates/task-definition.tpl-default.json" + ) + with open("./tests/task-definition-default.json", "r") as f: + expected_json = f.read() + expected = json.loads(expected_json) + self.assertEqual(result, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/ecs.py b/src/ecs.py deleted file mode 100755 index 3381ca2..0000000 --- a/src/ecs.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python3 - -import boto3 - -LAUNCH_TYPE_FARGATE = 'FARGATE' - -class EcsClient(object): - def __init__(self): - self.boto = boto3.client('ecs') - self.logs = boto3.client('logs') - self._last_event = None - self._log_next_token = None - - def update_service(self, cluster_name, app_name, task_definition, force_deployment=False): - return self.boto.update_service( - cluster=cluster_name, - service=app_name, - taskDefinition=task_definition, - forceNewDeployment=force_deployment - ) - - def describe_services(self, cluster_name, app_name): - result = self.boto.describe_services( - cluster=cluster_name, - services=[app_name] - ) - - if 'taskSets' in result['services'][0]: - for taskSet in result['services'][0]['taskSets']: - if taskSet['status'] == 'ACTIVE': - self.taskSetId = taskSet['id'] - - if 'deployments' in result['services'][0]: - for deployment in result['services'][0]['deployments']: - if deployment['status'] == 'PRIMARY': - self.ecsDeployId = deployment['id'] - - return result - - def register_task_definition(self, task_definition): - result = self.boto.register_task_definition( - **task_definition - ) - self.taskDefArn = result['taskDefinition']['taskDefinitionArn'] - return result - - def describe_task_definition(self, task_definition): - result = self.boto.describe_task_definition( - taskDefinition=task_definition - ) - self.taskDefArn = result['taskDefinition']['taskDefinitionArn'] - return result - - def list_tasks(self, cluster_name, started_by, desired_status='STOPPED'): - return self.boto.list_tasks( - cluster=cluster_name, - startedBy=started_by, - desiredStatus=desired_status - ) - - def describe_tasks(self, cluster_name, task_arns): - result = self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns) - self.status = result['tasks'][0]['lastStatus'] - return result - - def run_task(self, cluster_name, task_definition, launchtype, subnets, security_groups): - if launchtype == LAUNCH_TYPE_FARGATE: - if not subnets or not security_groups: - msg = 'At least one subnet and one security ' \ - 'group definition are required ' \ - 'for launch type FARGATE' - raise Exception(msg) - - network_configuration = { - "awsvpcConfiguration": { - "subnets": subnets, - "securityGroups": security_groups, - "assignPublicIp": "DISABLED" - } - } - - result = self.boto.run_task( - cluster=cluster_name, - taskDefinition=task_definition, - launchType=launchtype, - networkConfiguration=network_configuration - ) - - else: - result = self.boto.run_task( - cluster=cluster_name, - taskDefinition=task_definition - ) - - self.taskArn = result['tasks'][0]['taskArn'] - self.taskId = self.taskArn.split('/')[-1] - self.status = result['tasks'][0]['lastStatus'] - return result - - def describe_log_streams(self, log_group_name): - return self.logs.describe_log_streams( - logGroupName=log_group_name, orderBy='LastEventTime', descending=True, limit=1) - - def get_log_events(self, log_args): - return self.logs.get_log_events(**log_args) - - def tail_log_events(self, log_group_name, log_stream_name): - log_args = { - 'logGroupName': log_group_name, - 'logStreamName': log_stream_name, - 'startFromHead': True - } - - if self._log_next_token: - log_args['nextToken'] = self._log_next_token - - log_stream_events = self.get_log_events(log_args) - - self._log_next_token = log_stream_events['nextForwardToken'] - return log_stream_events['events'] - - def tail_ecs_events(self, cluster_name, app_name): - get_events = self.describe_services(cluster_name, app_name) - events = get_events['services'][0]['events'] - events_collected = [] - - for event in events: - if not self._last_event or event['id'] == self._last_event: - break - events_collected.insert(0, event) - - self._last_event = events[0]['id'] - return events_collected - \ No newline at end of file diff --git a/src/ecs_client.py b/src/ecs_client.py new file mode 100755 index 0000000..6c37015 --- /dev/null +++ b/src/ecs_client.py @@ -0,0 +1,144 @@ +import boto3 + +LAUNCH_TYPE_FARGATE = "FARGATE" + + +class EcsClient(object): + def __init__(self): + self.boto = boto3.client("ecs") + self.logs = boto3.client("logs") + self._last_event = None + self._log_next_token = None + + def update_service( + self, cluster_name: str, app_name: str, task_definition, force_deployment=False + ): + return self.boto.update_service( + cluster=cluster_name, + service=app_name, + taskDefinition=task_definition, + forceNewDeployment=force_deployment, + ) + + def get_deployment_id(self, cluster_name: str, service_name: str): + result = self.boto.describe_services(cluster=cluster_name, services=[service_name]) + if "deployments" in result["services"][0]: + for deployment in result["services"][0]["deployments"]: + if deployment["status"] == "PRIMARY": + return deployment["id"] + return None + + def get_taskset_id(self, cluster_name: str, service_name: str): + result = self.boto.describe_services(cluster=cluster_name, services=[service_name]) + if "taskSets" in result["services"][0]: + for taskSet in result["services"][0]["taskSets"]: + if taskSet["status"] == "ACTIVE": + self.taskSetId = taskSet["id"] + return None + + def get_service(self, cluster_name: str, service_name: str): + return self.boto.describe_services(cluster=cluster_name, services=[service_name]) + + # TO-DO: This function should be 3 different functions: setTaskSetId, setEcsDeployId and describeService + def describe_service(self, cluster_name, app_name): + result = self.boto.describe_services(cluster=cluster_name, services=[app_name]) + if "taskSets" in result["services"][0]: + for taskSet in result["services"][0]["taskSets"]: + if taskSet["status"] == "ACTIVE": + self.taskSetId = taskSet["id"] + if "deployments" in result["services"][0]: + for deployment in result["services"][0]["deployments"]: + if deployment["status"] == "PRIMARY": + self.ecsDeployId = deployment["id"] + return result + + # TO-DO: Functions should be pure (this one is returning and setting at the same time) + def register_task_definition(self, task_definition): + result = self.boto.register_task_definition(**task_definition) + self.taskDefArn = result["taskDefinition"]["taskDefinitionArn"] + return result + + def describe_task_definition(self, task_definition): + result = self.boto.describe_task_definition(taskDefinition=task_definition) + self.taskDefArn = result["taskDefinition"]["taskDefinitionArn"] + return result + + def list_tasks(self, cluster_name: str, started_by, desired_status="STOPPED"): + return self.boto.list_tasks( + cluster=cluster_name, startedBy=started_by, desiredStatus=desired_status + ) + + def describe_tasks(self, cluster_name: str, task_arns): + result = self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns) + self.status = result["tasks"][0]["lastStatus"] + return result + + def run_task( + self, cluster_name: str, task_definition, launchtype: str, subnets, security_groups + ): + if launchtype == LAUNCH_TYPE_FARGATE: + if not subnets or not security_groups: + msg = ( + "At least one subnet and one security " + "group definition are required " + "for launch type FARGATE" + ) + raise Exception(msg) + + network_configuration = { + "awsvpcConfiguration": { + "subnets": subnets, + "securityGroups": security_groups, + "assignPublicIp": "DISABLED", + } + } + result = self.boto.run_task( + cluster=cluster_name, + taskDefinition=task_definition, + launchType=launchtype, + networkConfiguration=network_configuration, + ) + else: + result = self.boto.run_task( + cluster=cluster_name, taskDefinition=task_definition + ) + self.taskArn = result["tasks"][0]["taskArn"] + self.taskId = self.taskArn.split("/")[-1] + self.status = result["tasks"][0]["lastStatus"] + return result + + def describe_log_streams(self, log_group_name): + return self.logs.describe_log_streams( + logGroupName=log_group_name, + orderBy="LastEventTime", + descending=True, + limit=1, + ) + + def get_log_events(self, log_args): + return self.logs.get_log_events(**log_args) + + def tail_log_events(self, log_group_name, log_stream_name): + log_args = { + "logGroupName": log_group_name, + "logStreamName": log_stream_name, + "startFromHead": True, + } + if self._log_next_token: + log_args["nextToken"] = self._log_next_token + log_stream_events = self.get_log_events(log_args) + self._log_next_token = log_stream_events["nextForwardToken"] + return log_stream_events["events"] + + def tail_ecs_events(self, cluster_name, app_name): + get_events = self.describe_services(cluster_name, app_name) + events = get_events["services"][0]["events"] + events_collected = [] + + for event in events: + if not self._last_event or event["id"] == self._last_event: + break + events_collected.insert(0, event) + + self._last_event = events[0]["id"] + return events_collected diff --git a/src/ecs_client_test.py b/src/ecs_client_test.py new file mode 100644 index 0000000..3da1cdd --- /dev/null +++ b/src/ecs_client_test.py @@ -0,0 +1,71 @@ +import unittest +import boto3 +import os +from moto import mock_aws +from ecs_client import EcsClient + + +@mock_aws +class EcsClientTest(unittest.TestCase): + def setUp(self): + os.environ["MOTO_ECS_SERVICE_RUNNING"] = "3" + os.environ["AWS_ACCESS_KEY_ID"] = "testing" + os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" + os.environ["AWS_SECURITY_TOKEN"] = "testing" + os.environ["AWS_SESSION_TOKEN"] = "testing" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + self.cluster_name = "test-cluster" + self.app_name = "test-app" + ecs_client = boto3.client("ecs") + ecs_client.create_cluster(clusterName=self.cluster_name) + ecs_client.register_task_definition( + family=self.app_name, + containerDefinitions=[ + { + "name": "task", + "image": "123456789012.dkr.ecr.us-west-2.amazonaws.com/derp:live", + "cpu": 1024, + "memory": 2048, + "memoryReservation": 2048, + "portMappings": [ + {"containerPort": 8000, "hostPort": 8000, "protocol": "tcp"} + ], + "essential": True, + "mountPoints": [], + "volumesFrom": [], + "linuxParameters": {"initProcessEnabled": True}, + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + ecs_client.create_service( + cluster=self.cluster_name, + serviceName=self.app_name, + taskDefinition=self.app_name, + desiredCount=1, + ) + + self.client = EcsClient() + + def test_describe_service(self): + result = self.client.describe_service(self.cluster_name, self.app_name) + self.assertEqual(len(result["services"]), 1) + self.assertEqual( + result["services"][0]["serviceArn"], + "arn:aws:ecs:us-east-1:123456789012:service/test-cluster/test-app", + ) + self.assertEqual(result["services"][0]["serviceName"], self.app_name) + self.assertEqual( + result["services"][0]["clusterArn"], + "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster", + ) + + def test_get_deployment_id(self): + result = self.client.get_deployment_id(self.cluster_name, self.app_name) + expected = self.client.get_service(self.cluster_name, self.app_name) + self.assertEqual(result, expected["services"][0]["deployments"][0]["id"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/requirements.txt b/src/requirements.txt new file mode 100644 index 0000000..91b95d9 --- /dev/null +++ b/src/requirements.txt @@ -0,0 +1,2 @@ +boto3 +moto diff --git a/src/tests/app-spec-with-capacity-provider-strategy.json b/src/tests/app-spec-with-capacity-provider-strategy.json new file mode 100644 index 0000000..8af6e5d --- /dev/null +++ b/src/tests/app-spec-with-capacity-provider-strategy.json @@ -0,0 +1,6 @@ +{ + "revisionType": "AppSpecContent", + "appSpecContent": { + "content": "{ \"version\":1, \"Resources\": [ { \"TargetService\": { \"Type\": \"AWS::ECS::Service\", \"Properties\": { \"TaskDefinition\":\"arn:aws:1234567890:asdf\", \"LoadBalancerInfo\": { \"ContainerName\": \"test-app\", \"ContainerPort\": 8080 } ,\"CapacityProviderStrategy\":[\"asdfasdf\"] } } } ] }" + } +} diff --git a/src/tests/app-spec-without-capacity-provider-strategy.json b/src/tests/app-spec-without-capacity-provider-strategy.json new file mode 100644 index 0000000..2d79d7e --- /dev/null +++ b/src/tests/app-spec-without-capacity-provider-strategy.json @@ -0,0 +1,6 @@ +{ + "revisionType": "AppSpecContent", + "appSpecContent": { + "content": "{ \"version\":1, \"Resources\": [ { \"TargetService\": { \"Type\": \"AWS::ECS::Service\", \"Properties\": { \"TaskDefinition\":\"arn:aws:1234567890:asdf\", \"LoadBalancerInfo\": { \"ContainerName\": \"test-app\", \"ContainerPort\": 8080 } } } } ] }" + } +} diff --git a/src/tests/task-definition-default.json b/src/tests/task-definition-default.json new file mode 100644 index 0000000..a6da908 --- /dev/null +++ b/src/tests/task-definition-default.json @@ -0,0 +1,32 @@ +{ + "containerDefinitions": [ + { + "essential": true, + "image": "dnxlabs/docker-ecs", + "command": "ls -la", + "cpu": 1500, + "memory": 3000, + "memoryReservation": 3000, + "name": "test-app", + "portMappings": [ + { + "containerPort": 8080 + } + ], + "environment": [], + "mountPoints": [], + "volumesFrom": [], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/test-cluster/test-app", + "awslogs-region": "ap-southeast-2", + "awslogs-stream-prefix": "test-app" + } + } + } + ], + "family": "test-cluster-test-app", + "executionRoleArn": "arn:aws:iam::1234567890:role/ecs-task-test-cluster-ap-southeast-2", + "taskRoleArn": "arn:aws:iam::1234567890:role/ecs-task-test-cluster-ap-southeast-2" +} diff --git a/src/utils.py b/src/utils.py index baaae78..2810116 100644 --- a/src/utils.py +++ b/src/utils.py @@ -16,24 +16,21 @@ def validate_json(json_data): json.loads(json_data) return True except ValueError as err: - print('JSON not valide: %s' % err) + print('JSON not valid: %s' % err) def json_template(json_template, env_vars=os.environ): - try: - json_file = open(json_template) + with open(json_template, 'r') as json_file: data = json_file.read() - except: - print('File %s not found' % json_template) - try: - template = Template(data).substitute(env_vars) - except KeyError as err: - print('Missing variable %s' % str(err)) - exit(1) + try: + template = Template(data).substitute(env_vars) + except KeyError as err: + print('Missing variable %s' % str(err)) + exit(1) - try: - validate_json(template) - except Exception as err: - print(err) - - return template + try: + validate_json(template) + except Exception as err: + print(err) + + return template diff --git a/templates/app-spec.tpl.json b/templates/app-spec.tpl.json new file mode 100755 index 0000000..cc51325 --- /dev/null +++ b/templates/app-spec.tpl.json @@ -0,0 +1,6 @@ +{ + "revisionType": "AppSpecContent", + "appSpecContent": { + "content": "{ \"version\":1, \"Resources\": [ { \"TargetService\": { \"Type\": \"AWS::ECS::Service\", \"Properties\": { \"TaskDefinition\":\"$TASK_ARN\", \"LoadBalancerInfo\": { \"ContainerName\": \"$APP_NAME\", \"ContainerPort\": $CONTAINER_PORT } $CAPACITY_PROVIDER_STRATEGY } } } ] }" + } +}