From f9f2c4dcd68a248614e05b60a628566b1bc7ea43 Mon Sep 17 00:00:00 2001 From: "nada.jankovic" Date: Thu, 8 Jul 2021 19:39:38 -0400 Subject: [PATCH] Reformat and fix API request --- Dogmover/dogmover.py | 496 ++++++++++++++++++++++++++++++------------- 1 file changed, 354 insertions(+), 142 deletions(-) diff --git a/Dogmover/dogmover.py b/Dogmover/dogmover.py index 8a1f3d6..ab2591e 100755 --- a/Dogmover/dogmover.py +++ b/Dogmover/dogmover.py @@ -15,7 +15,7 @@ Run with --dry-run without making any changes to your Datadog account: dogmover.py pull dashboards --dry-run dogmover.py push dashboards --dry-run - + Supported arguments: dogmover.py pull|push dashboards|monitors|users|synthetics_api_tests|synthetics_browser_tests|awsaccounts|logpipelines|notebooks (--tag tag) (--dry-run|-h) @@ -28,6 +28,7 @@ """ __author__ = "Misiu Pajor " __version__ = "2.1.0" + from docopt import docopt import json import os @@ -35,6 +36,7 @@ import requests from datadog import initialize, api + def _init_options(action): config_file = "config.json" try: @@ -46,36 +48,40 @@ def _init_options(action): options = {} if action == "pull": options = { - 'api_key': config["source_api_key"], - 'app_key': config["source_app_key"], - 'api_host': config["source_api_host"] + "api_key": config["source_api_key"], + "app_key": config["source_app_key"], + "api_host": config["source_api_host"], } elif action == "push": - options = { - 'api_key': config["dest_api_key"], - 'app_key': config["dest_app_key"], - 'api_host': config["dest_api_host"] - } + options = { + "api_key": config["dest_api_key"], + "app_key": config["dest_app_key"], + "api_host": config["dest_api_host"], + } initialize(**options) return options + def _ensure_directory(directory): if not os.path.exists(directory): os.makedirs(directory) return directory + def _json_to_file(path, fileName, data): - filePathNameWExt = './' + path + '/' + fileName + '.json' + filePathNameWExt = "./" + path + "/" + fileName + ".json" _ensure_directory(path) - with open(filePathNameWExt, 'w') as fp: - json.dump(data, fp, sort_keys = True, indent = 4) + with open(filePathNameWExt, "w") as fp: + json.dump(data, fp, sort_keys=True, indent=4) return filePathNameWExt + def _files_to_json(type): - files = glob.glob('{}/*.json'.format(type)) + files = glob.glob("{}/*.json".format(type)) return files + def pull_dashboards(): path = False count = 0 @@ -85,32 +91,61 @@ def pull_dashboards(): count = count + 1 json_data = api.Dashboard.get(dashboard["id"]) if not arguments["--dry-run"]: - path = _json_to_file('dashboards', dashboard["id"], json_data) - print("Pulling dashboard: {} with id: {}, writing to file: {}".format(dashboard["title"].encode('utf8'), dashboard["id"], path)) + path = _json_to_file("dashboards", dashboard["id"], json_data) + print( + "Pulling dashboard: {} with id: {}, writing to file: {}".format( + dashboard["title"].encode("utf8"), dashboard["id"], path + ) + ) print("Retrieved '{}' dashboards.".format(count)) + def pull_monitors(tag): path = False count = 0 - good_keys = ['tags', 'deleted', 'query', 'message', 'matching_downtimes', 'multi', 'name', 'type', 'options', 'id'] + good_keys = [ + "tags", + "deleted", + "query", + "message", + "matching_downtimes", + "multi", + "name", + "type", + "options", + "id", + ] tags = [] if not tag else tag monitors = api.Monitor.get_all() for monitor in monitors: if monitor["type"] == "synthetics alert": - print("Skipping \"{}\" as this is a monitor belonging to a synthetic test. Synthetic monitors will be automatically re-created when you push synthetic tests.".format(monitor["name"].encode('utf8'))) - continue + print( + 'Skipping "{}" as this is a monitor belonging to a synthetic test. Synthetic monitors will be ' + 'automatically re-created when you push synthetic tests.'.format( + monitor["name"].encode("utf8") + ) + ) + continue all_tags_found = True for tag in tags: if not tag in monitor["tags"]: all_tags_found = False - print("Tag: {} not found in monitor: \"{}\" with tags {}".format(tag, monitor["name"].encode('utf8'), monitor["tags"])) + print( + 'Tag: {} not found in monitor: "{}" with tags {}'.format( + tag, monitor["name"].encode("utf8"), monitor["tags"] + ) + ) break - if all_tags_found == False: - print("Skipping \"{}\" because its tags do not match the filter.".format(monitor["name"].encode('utf8'))) + if not all_tags_found: + print( + 'Skipping "{}" because its tags do not match the filter.'.format( + monitor["name"].encode("utf8") + ) + ) - if all_tags_found == True: + if all_tags_found: count = count + 1 new_monitor = {} @@ -118,22 +153,31 @@ def pull_monitors(tag): if k in good_keys: new_monitor[k] = v if not arguments["--dry-run"]: - path = _json_to_file('monitors', str(new_monitor["id"]), new_monitor) - print("Pulling monitor: \"{}\" with id: {}, writing to file: {}".format(new_monitor["name"].encode('utf8'), new_monitor["id"], path)) + path = _json_to_file("monitors", str(new_monitor["id"]), new_monitor) + print( + 'Pulling monitor: "{}" with id: {}, writing to file: {}'.format( + new_monitor["name"].encode("utf8"), new_monitor["id"], path + ) + ) print("Retrieved '{}' monitors.".format(count)) + def pull_users(): path = False count = 0 users = api.User.get_all() for user in users["users"]: - if not user["disabled"]: # don't pull disabled users + if not user["disabled"]: # don't pull disabled users count = count + 1 json_data = api.User.get(user["handle"]) if not arguments["--dry-run"]: - path = _json_to_file('users', user["handle"], json_data["user"]) - print("Pulling user: {} with role: {}, writing to file: {}".format(user["handle"].encode('utf8'), user["access_role"], path)) + path = _json_to_file("users", user["handle"], json_data["user"]) + print( + "Pulling user: {} with role: {}, writing to file: {}".format( + user["handle"].encode("utf8"), user["access_role"], path + ) + ) print("Retrieved '{}' users.".format(count)) @@ -142,61 +186,106 @@ def pull_synthetics_api_tests(options, tag): count = 0 tags = [] if not tag else tag - r = requests.get('{}api/v1/synthetics/tests?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"] + } + + r = requests.get( + "{}api/v1/synthetics/tests".format( + options["api_host"] + ), headers=headers + ) synthetics = r.json() for synthetic in synthetics["tests"]: if synthetic["type"] == "api": - all_tags_found="true" + all_tags_found = "true" for tag in tags: if not tag in synthetic["tags"]: - all_tags_found="false" - print("Tag: {} not found in synthetic: \"{}\" with tags {}".format(tag, synthetic["name"].encode('utf8'), synthetic["tags"])) + all_tags_found = "false" + print( + 'Tag: {} not found in synthetic: "{}" with tags {}'.format( + tag, synthetic["name"].encode("utf8"), synthetic["tags"] + ) + ) break if all_tags_found == "false": - print("Skipping \"{}\" because its tags do not match the filter.".format(synthetic["name"].encode('utf8'))) + print( + 'Skipping "{}" because its tags do not match the filter.'.format( + synthetic["name"].encode("utf8") + ) + ) if all_tags_found == "true": count = count + 1 - json_data = requests.get('{}api/v1/synthetics/tests/{}?api_key={}&application_key={}'.format( - options["api_host"], - synthetic["public_id"], - options["api_key"], - options["app_key"] - )).json() - path = _json_to_file('synthetics_api_tests', synthetic["public_id"], json_data) - print("Pulling: {} and writing to file: {}".format(synthetic["name"].encode('utf8'), path)) + json_data = requests.get( + "{}api/v1/synthetics/tests/{}".format( + options["api_host"], + synthetic["public_id"] + ), headers=headers + ).json() + path = _json_to_file( + "synthetics_api_tests", synthetic["public_id"], json_data + ) + print( + "Pulling: {} and writing to file: {}".format( + synthetic["name"].encode("utf8"), path + ) + ) print("Retrieved '{}' synthetic tests.".format(count)) + def pull_synthetics_browser_tests(options, tag): path = False count = 0 tags = [] if not tag else tag - - r = requests.get('{}api/v1/synthetics/tests?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + headers = { + "content-type": "application/json", + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + + r = requests.get( + "{}api/v1/synthetics/tests".format(options["api_host"]), headers=headers + ) synthetics = r.json() for synthetic in synthetics["tests"]: if synthetic["type"] == "browser": - all_tags_found="true" + all_tags_found = "true" for tag in tags: if not tag in synthetic["tags"]: - all_tags_found="false" - print("Tag: {} not found in synthetic: \"{}\" with tags {}".format(tag, synthetic["name"].encode('utf8'), synthetic["tags"])) + all_tags_found = "false" + print( + 'Tag: {} not found in synthetic: "{}" with tags {}'.format( + tag, synthetic["name"].encode("utf8"), synthetic["tags"] + ) + ) break if all_tags_found == "false": - print("Skipping \"{}\" because its tags do not match the filter.".format(synthetic["name"].encode('utf8'))) + print( + 'Skipping "{}" because its tags do not match the filter.'.format( + synthetic["name"].encode("utf8") + ) + ) if all_tags_found == "true": count = count + 1 - json_data = requests.get('{}api/v1/synthetics/tests/browser/{}?api_key={}&application_key={}'.format( - options["api_host"], - synthetic["public_id"], - options["api_key"], - options["app_key"] - )).json() - path = _json_to_file('synthetics_browser_tests', synthetic["public_id"], json_data) - print("Pulling: {} and writing to file: {}".format(synthetic["name"].encode('utf8'), path)) + json_data = requests.get( + "{}api/v1/synthetics/tests/browser/{}".format( + options["api_host"], synthetic["public_id"] + ), + headers=headers, + ).json() + path = _json_to_file( + "synthetics_browser_tests", synthetic["public_id"], json_data + ) + print( + "Pulling: {} and writing to file: {}".format( + synthetic["name"].encode("utf8"), path + ) + ) print("Retrieved '{}' synthetic tests.".format(count)) @@ -204,52 +293,83 @@ def pull_awsaccounts(options): path = False count = 0 - r = requests.get('{}api/v1/integration/aws?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + + r = requests.get( + "{}api/v1/integration/aws".format(options["api_host"]), headers=headers + ) awsaccounts = r.json() for awsaccount in awsaccounts["accounts"]: count = count + 1 if not arguments["--dry-run"]: - path = _json_to_file('awsaccounts', awsaccount["account_id"], awsaccount) + path = _json_to_file("awsaccounts", awsaccount["account_id"], awsaccount) print("Retrieved '{}' AWS accounts.".format(count)) + def pull_logpipelines(options): path = False count = 0 - r = requests.get('{}api/v1/logs/config/pipelines?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + headers = { + "content-type": "application/json", + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + + r = requests.get( + "{}api/v1/logs/config/pipelines".format(options["api_host"]), headers=headers + ) rJSON = r.json() for item in rJSON: count = count + 1 - path = _json_to_file('logpipelines', item["id"], item) + path = _json_to_file("logpipelines", item["id"], item) print("Retrieved '{}' log pipelines.".format(count)) + def pull_notebooks(options): path = False count = 0 - r = requests.get('{}api/v1/notebook?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + + r = requests.get("{}api/v1/notebook".format(options["api_host"]), headers=headers) notebooks = r.json() - if 'errors' in notebooks: # check if feature flag is enabled in this organisation - if 'You do not have permission' in notebooks["errors"][0]: - exit("Notebooks API (notebooks_api) feature flag is not enabled on this Datadog organisation. help@datadoghq.com for more information.") + if "errors" in notebooks: # check if feature flag is enabled in this organisation + if "You do not have permission" in notebooks["errors"][0]: + exit( + "Notebooks API (notebooks_api) feature flag is not enabled on this Datadog organisation. help@datadoghq.com for more information." + ) for notebook in notebooks["notebooks"]: count = count + 1 - path = _json_to_file('notebooks', str(notebook["id"]), notebook) + path = _json_to_file("notebooks", str(notebook["id"]), notebook) print("Retrieved '{}' notebooks.".format(count)) + def pull_slos(options): path = False + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + count = 0 - r = requests.get('{}api/v1/slo?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"])) + r = requests.get("{}api/v1/slo".format(options["api_host"]), headers=headers) slos = r.json() for slo in slos["data"]: count = count + 1 - path = _json_to_file('slos', str(slo["id"]), slo) + path = _json_to_file("slos", str(slo["id"]), slo) print("Retrieved '{}' SLOs.".format(count)) + def push_dashboards(): count = 0 dashboards = _files_to_json("dashboards") @@ -260,7 +380,7 @@ def push_dashboards(): with open(dashboard) as f: data = json.load(f) count = count + 1 - print("Pushing {}".format(data["title"].encode('utf8'))) + print("Pushing {}".format(data["title"].encode("utf8"))) if not arguments["--dry-run"]: api.Dashboard.create( title=data["title"], @@ -269,7 +389,7 @@ def push_dashboards(): template_variables=data["template_variables"], layout_type=data["layout_type"], notify_list=data["notify_list"], - is_read_only=data["is_read_only"] + is_read_only=data["is_read_only"], ) print("Pushed '{}' dashboards".format(count)) @@ -288,29 +408,34 @@ def push_monitors(): data = json.load(f) if not data["type"] == "composite": old_id = str(data["id"]) - print("Pushing monitors:", data["id"], data["name"].encode('utf8')) + print("Pushing monitors:", data["id"], data["name"].encode("utf8")) if not arguments["--dry-run"]: - result = api.Monitor.create(type=data['type'], - query=data['query'], - name=data['name'], - message=data['message'], - tags=data['tags'], - options=data['options']) - if 'errors' in result: - print('Error pushing monitor:',data["id"],json.dumps(result, indent=4, sort_keys=True)) - err_count=err_count+1 + result = api.Monitor.create( + type=data["type"], + query=data["query"], + name=data["name"], + message=data["message"], + tags=data["tags"], + options=data["options"], + ) + if "errors" in result: + print( + "Error pushing monitor:", + data["id"], + json.dumps(result, indent=4, sort_keys=True), + ) + err_count = err_count + 1 else: count = count + 1 - new_id= result['id'] + new_id = result["id"] api.Monitor.mute(new_id) - print("New monitor ", str(new_id)," has been muted.") + print("New monitor ", str(new_id), " has been muted.") ids[old_id] = str(new_id) else: # Fake new id for dry-run purpose ids[old_id] = old_id[:3] + "xxx" - # Second loop to import composite monitors for monitor in monitors: with open(monitor) as f: @@ -318,31 +443,48 @@ def push_monitors(): if data["type"] == "composite": new_query = data["query"] for old_id, new_id in ids.items(): - new_query=new_query.replace(old_id, new_id) - print("Pushing composite monitors:", data["id"], data["name"].encode('utf8')," with query ", new_query.encode('utf8')) + new_query = new_query.replace(old_id, new_id) + print( + "Pushing composite monitors:", + data["id"], + data["name"].encode("utf8"), + " with query ", + new_query.encode("utf8"), + ) if not arguments["--dry-run"]: - result = api.Monitor.create(type=data['type'], - query=new_query, - name=data['name'], - message=data['message'], - tags=data['tags'], - options=data['options']) - if 'errors' in result: - print('Error pushing monitor:',data["id"],json.dumps(result, indent=4, sort_keys=True)) - err_count=err_count+1 + result = api.Monitor.create( + type=data["type"], + query=new_query, + name=data["name"], + message=data["message"], + tags=data["tags"], + options=data["options"], + ) + if "errors" in result: + print( + "Error pushing monitor:", + data["id"], + json.dumps(result, indent=4, sort_keys=True), + ) + err_count = err_count + 1 else: count = count + 1 - new_id= result['id'] + new_id = result["id"] api.Monitor.mute(new_id) - print("New monitor ", str(new_id)," has been muted.") + print("New monitor ", str(new_id), " has been muted.") if count > 0: - print("Pushed '{}' monitors in muted status, navigate to Monitors -> Manage downtime to unmute.".format(count)) + print( + "Pushed '{}' monitors in muted status, navigate to Monitors -> Manage downtime to unmute.".format( + count + ) + ) if err_count > 0: print("Error pushing '{}' monitors, please check !".format(err_count)) + def push_users(): count = 0 users = _files_to_json("users") @@ -353,90 +495,136 @@ def push_users(): with open(user) as f: data = json.load(f) count = count + 1 - print("Pushing: {}".format(data["handle"].encode('utf8'))) + print("Pushing: {}".format(data["handle"].encode("utf8"))) if not arguments["--dry-run"]: api.User.create( handle=data["handle"], name=data["name"], - access_role=data["access_role"] + access_role=data["access_role"], ) print("Pushed '{}' users".format(count)) + def push_synthetics_api_tests(options): count = 0 synthetics = _files_to_json("synthetics_api_tests") if not synthetics: exit("No synthetic tests are locally available. Consider synthetics first.") + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + for synthetic in synthetics: - with open(synthetic) as f: + with open(synthetic) as f: data = json.load(f) count = count + 1 invalid_keys = ["public_id", "monitor_id"] list(map(data.pop, invalid_keys)) - print("Pushing {}".format(data["name"].encode('utf8'))) + print("Pushing {}".format(data["name"].encode("utf8"))) if not arguments["--dry-run"]: - r = requests.post('{}api/v1/synthetics/tests?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), json=data) + r = requests.post( + "{}api/v1/synthetics/tests".format(options["api_host"]), + headers=headers, + json=data, + ) print("Pushed '{}' synthetic tests.".format(count)) + def push_synthetics_browser_tests(options): count = 0 synthetics = _files_to_json("synthetics_browser_tests") if not synthetics: exit("No synthetic tests are locally available. Consider synthetics first.") + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + for synthetic in synthetics: - with open(synthetic) as f: + with open(synthetic) as f: data = json.load(f) count = count + 1 invalid_keys = ["public_id", "monitor_id"] list(map(data.pop, invalid_keys)) - print("Pushing {}".format(data["name"].encode('utf8'))) + print("Pushing {}".format(data["name"].encode("utf8"))) if not arguments["--dry-run"]: - r = requests.post('{}api/v1/synthetics/tests?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), json=data) + r = requests.post( + "{}api/v1/synthetics/tests".format(options["api_host"]), + headers=headers, + json=data, + ) print("Pushed '{}' synthetic tests.".format(count)) + def push_awsaccounts(options): count = 0 awsaccounts = _files_to_json("awsaccounts") if not awsaccounts: - exit("No awsaccounts are locally available. Consider pulling awsaccounts first.") + exit( + "No awsaccounts are locally available. Consider pulling awsaccounts first." + ) + + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } for awsaccount in awsaccounts: with open(awsaccount) as f: data = json.load(f) count = count + 1 - print("Pushing {}".format(data["account_id"].encode('utf8'))) + print("Pushing {}".format(data["account_id"].encode("utf8"))) if not arguments["--dry-run"]: - r = requests.post('{}api/v1/integration/aws?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), json=data) + r = requests.post( + "{}api/v1/integration/aws".format(options["api_host"]), + headers=headers, + json=data, + ) json_data = json.loads(r.text) json_data["account_id"] = data["account_id"] print(json.dumps(json_data)) - path = _json_to_file('awsaccounts.out', data["account_id"], json_data) + path = _json_to_file("awsaccounts.out", data["account_id"], json_data) print("Pushed '{}' AWS accounts.".format(count)) - print("You can now use the json files in the awsaccounts.out folder to automate the AWS External ID onboarding using AWS APIs.") + print( + "You can now use the json files in the awsaccounts.out folder to automate the AWS External ID onboarding using AWS APIs." + ) + def push_logpipelines(options): count = 0 fJSON = _files_to_json("logpipelines") if not fJSON: - exit("No logpipelines are locally available. Consider pulling logpipelines first.") + exit( + "No logpipelines are locally available. Consider pulling logpipelines first." + ) + + headers = { + "content-type": "application/json", + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } for item in fJSON: with open(item) as f: data = json.load(f) count = count + 1 - print("Pushing {}".format(data["id"].encode('utf8'))) - itemId = data['id'] - del data['id'] - del data['is_read_only'] - del data['type'] - headers = {'content-type': 'application/json'} + print("Pushing {}".format(data["id"].encode("utf8"))) + itemId = data["id"] + del data["id"] + del data["is_read_only"] + del data["type"] if not arguments["--dry-run"]: - r = requests.post('{}api/v1/logs/config/pipelines?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), headers=headers, json=data) + r = requests.post( + "{}api/v1/logs/config/pipelines".format(options["api_host"]), + headers=headers, + json=data, + ) json_data = json.loads(r.text) json_data["id"] = itemId - path = _json_to_file('logpipelines.out', itemId, json_data) + path = _json_to_file("logpipelines.out", itemId, json_data) print("Pushed '{}' log pipelines.".format(count)) @@ -446,73 +634,97 @@ def push_notebooks(options): if not notebooks: exit("No notebooks are locally available. Consider pulling notebooks first.") + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + for notebook in notebooks: with open(notebook) as f: data = json.load(f) count = count + 1 - print("Pushing: {}".format(data["name"].encode('utf8'))) + print("Pushing: {}".format(data["name"].encode("utf8"))) if not arguments["--dry-run"]: - r = requests.post('{}api/v1/notebook?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), json=data) + r = requests.post( + "{}api/v1/notebook?api_key={}&application_key={}".format( + options["api_host"] + ), + headers=headers, + json=data, + ) print("Pushed '{}' notebooks".format(count)) + def push_slos(options): - count=0 + count = 0 slos = _files_to_json("slos") if not slos: exit("No SLOs are locally available. Consider pulling the SLOs first.") + headers = { + "DD-API-KEY": options["api_key"], + "DD-APPLICATION-KEY": options["app_key"], + } + for slo in slos: with open(slo) as f: data = json.load(f) count = count + 1 - print("Pushing: {}".format(data["name"].encode('utf-8'))) + print("Pushing: {}".format(data["name"].encode("utf-8"))) if not arguments["--dry-run"]: - r = requests.post('{}api/v1/slo?api_key={}&application_key={}'.format(options["api_host"], options["api_key"], options["app_key"]), json=data) + r = requests.post( + "{}api/v1/slo".format(options["api_host"]), + headers=headers, + json=data, + ) print("Pushed '{}' SLOs".format(count)) -if __name__ == '__main__': - arguments = docopt(__doc__, version='0.1.1rc') + +if __name__ == "__main__": + arguments = docopt(__doc__, version="0.1.1rc") if arguments["--dry-run"]: - print("You are running in dry-mode. No changes will be commmited to your Datadog account(s).") + print( + "You are running in dry-mode. No changes will be commmited to your Datadog account(s)." + ) if arguments["pull"]: _init_options("pull") - if arguments[''] == 'dashboards': + if arguments[""] == "dashboards": pull_dashboards() - elif arguments[''] == 'monitors': + elif arguments[""] == "monitors": pull_monitors(arguments["--tag"]) - elif arguments[''] == 'users': + elif arguments[""] == "users": pull_users() - elif arguments[''] == 'synthetics_api_tests': + elif arguments[""] == "synthetics_api_tests": pull_synthetics_api_tests(_init_options("pull"), arguments["--tag"]) - elif arguments[''] == 'synthetics_browser_tests': + elif arguments[""] == "synthetics_browser_tests": pull_synthetics_browser_tests(_init_options("pull"), arguments["--tag"]) - elif arguments[''] == 'awsaccounts': + elif arguments[""] == "awsaccounts": pull_awsaccounts(_init_options("pull")) - elif arguments[''] == 'logpipelines': + elif arguments[""] == "logpipelines": pull_logpipelines(_init_options("pull")) - elif arguments[''] == 'notebooks': + elif arguments[""] == "notebooks": pull_notebooks(_init_options("pull")) - elif arguments[''] == 'slos': + elif arguments[""] == "slos": pull_slos(_init_options("pull")) elif arguments["push"]: _init_options("push") - if arguments[''] == 'dashboards': + if arguments[""] == "dashboards": push_dashboards() - elif arguments[''] == 'monitors': + elif arguments[""] == "monitors": push_monitors() - elif arguments[''] == 'users': + elif arguments[""] == "users": push_users() - elif arguments[''] == 'synthetics_api_tests': + elif arguments[""] == "synthetics_api_tests": push_synthetics_api_tests(_init_options("push")) - elif arguments[''] == 'synthetics_browser_tests': + elif arguments[""] == "synthetics_browser_tests": push_synthetics_browser_tests(_init_options("push")) - elif arguments[''] == 'awsaccounts': + elif arguments[""] == "awsaccounts": push_awsaccounts(_init_options("push")) - elif arguments[''] == 'logpipelines': + elif arguments[""] == "logpipelines": push_logpipelines(_init_options("push")) - elif arguments[''] == 'notebooks': + elif arguments[""] == "notebooks": push_notebooks(_init_options("push")) - elif arguments[''] == 'slos': + elif arguments[""] == "slos": push_slos(_init_options("push"))