diff --git a/src/Makefile b/src/Makefile index 5c92baef..7aca9b1c 100644 --- a/src/Makefile +++ b/src/Makefile @@ -9,9 +9,9 @@ rpm: mkdir -p ~/rpmbuild/SOURCES mkdir -p ~/rpmbuild/SPECS mkdir -p ~/rpmbuild/SRPMS - cp contrail_smgr.spec ~/rpmbuild/SPECS - rpmbuild -bb ~/rpmbuild/SPECS/contrail_smgr.spec - cp ~/rpmbuild/RPMS/noarch/contrail_smgr-1.0-1.noarch.rpm ~/rpmbuild/RPMS/noarch/contrail_smgr-1.0-1_$(commit_id).noarch.rpm + cp contrail-server-manager.spec ~/rpmbuild/SPECS + rpmbuild -bb ~/rpmbuild/SPECS/contrail-server-manager.spec + cp ~/rpmbuild/RPMS/noarch/contrail_server-manager-1.0-1.noarch.rpm ~/rpmbuild/RPMS/noarch/contrail_server-manager-1.0-1_$(commit_id).noarch.rpm deb: (cd debian-contrail-smgr; fakeroot debian/rules binary) @@ -25,8 +25,8 @@ client-rpm: mkdir -p ~/rpmbuild/SOURCES mkdir -p ~/rpmbuild/SPECS mkdir -p ~/rpmbuild/SRPMS - cp contrail_smgr_client.spec ~/rpmbuild/SPECS - rpmbuild -bb ~/rpmbuild/SPECS/contrail_smgr_client.spec + cp contrail-server-manager-client.spec ~/rpmbuild/SPECS + rpmbuild -bb ~/rpmbuild/SPECS/contrail-server-manager-client.spec client-deb: (cd debian-contrail-smgr-client; fakeroot debian/rules binary) diff --git a/src/client/cluster-5-node-interface.json b/src/client/cluster-5-node-interface.json new file mode 100644 index 00000000..b421a8d5 --- /dev/null +++ b/src/client/cluster-5-node-interface.json @@ -0,0 +1,27 @@ +{ + "cluster" : [ + { + "id" : "5-node", + "parameters" : { + "router_asn": "64512", + "database_dir": "/home/cassandra", + "database_token": "", + "use_certificates": "False", + "multi_tenancy": "False", + "encapsulation_priority": "MPLSoUDP,MPLSoGRE,VXLAN", + "service_token": "contrail123", + "keystone_username": "admin", + "keystone_password": "contrail123", + "keystone_tenant": "admin", + "analytics_data_ttl": "168", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", + "haproxy": "disable", + "external_bgp": "", + "domain": "englab.juniper.net" + } + } + ] +} + diff --git a/src/client/cluster.json b/src/client/cluster.json index ec24a0be..68e3c0a0 100644 --- a/src/client/cluster.json +++ b/src/client/cluster.json @@ -1,7 +1,26 @@ { - "cluster": [ - { - "cluster_id": "demo-cluster" + "cluster" : [ + { + "id" : "demo-cluster", + "parameters" : { + "router_asn": "64512", + "database_dir": "/home/cassandra", + "database_token": "", + "use_certificates": "False", + "multi_tenancy": "False", + "encapsulation_priority": "MPLSoUDP,MPLSoGRE,VXLAN", + "service_token": "contrail123", + "keystone_username": "admin", + "keystone_password": "contrail123", + "keystone_tenant": "admin", + "analytics_data_ttl": "168", + "haproxy": "disable", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.46", + "password": "c0ntrail123", + "external_bgp": "", + "domain": "englab.juniper.net" + } } - ] -} + ] +} \ No newline at end of file diff --git a/src/client/cluster_openstack_ha.json b/src/client/cluster_openstack_ha.json new file mode 100644 index 00000000..71f8e6ec --- /dev/null +++ b/src/client/cluster_openstack_ha.json @@ -0,0 +1,37 @@ +{ + "cluster": [ + { + "email": "tsurendra@juniper.net", + "id": "cluster1", + "parameters": { + "analytics_data_ttl": "168", + "database_dir": "/home/cassandra", + "database_token": "", + "domain": "contrail.juniper.net", + "encapsulation_priority": "MPLSoUDP,MPLSoGRE,VXLAN", + "external_bgp": "", + "gateway": "10.84.17.254", + "ha": "True", + "haproxy": "disable", + "keystone_password": "contrail123", + "keystone_tenant": "admin", + "keystone_username": "admin", + "nfs_path": "/", + "multi_tenancy": "False", + "openstack_mgmt_ip": "", + "openstack_passwd": "contrail123", + "password": "c0ntrail123", + "router_asn": "64512", + "service_token": "contrail123", + "storage_fsid": "00da01de-a6c2-4293-9fc9-85e39d6ea92d", + "storage_virsh_uuid": "69cc6a31-805a-4cc5-9b6b-0cbfefe1ba6a", + "subnet_mask": "255.255.255.0", + "use_certificates": "False", + "nfs_server": "", + "internal_vip": "10.84.17.23", + "uuid": "f4def0e9-7068-41c0-aafd-e70be63f4262" + } + } + ] +} + diff --git a/src/client/image.json b/src/client/image.json index 628254fa..79a525e3 100644 --- a/src/client/image.json +++ b/src/client/image.json @@ -1,22 +1,22 @@ { "image": [ { - "image_id": "ubuntu-12.04.3", - "image_type": "ubuntu", - "image_version": "ubuntu-12.04.3", - "image_path": "/iso/ubuntu-12.04.3-server-amd64.iso" + "id": "ubuntu-12.04.3", + "type": "ubuntu", + "version": "ubuntu-12.04.3", + "path": "/iso/ubuntu-12.04.3-server-amd64.iso" }, { - "image_id": "centos-6.4", - "image_type": "centos", - "image_version": "centos-6.4", - "image_path": "/iso/CentOS-6.4-x86_64-minimal.iso" + "id": "centos-6.4", + "type": "centos", + "version": "centos-6.4", + "path": "/iso/CentOS-6.4-x86_64-minimal.iso" }, { - "image_id": "contrail-ubuntu-150", - "image_type": "contrail-ubuntu-package", - "image_version": "contrail-ubuntu-150", - "image_path": "/iso/contrail-install-packages_1.03-150_all.deb" + "id": "contrail-ubuntu-150", + "type": "contrail-ubuntu-package", + "version": "contrail-ubuntu-150", + "path": "/iso/contrail-install-packages_1.03-150_all.deb" } ] } diff --git a/src/client/server-5-node-interface.json b/src/client/server-5-node-interface.json index aeaf1f3d..021a8b9b 100644 --- a/src/client/server-5-node-interface.json +++ b/src/client/server-5-node-interface.json @@ -1,137 +1,122 @@ { "server": [ { - "server_id": "nodec57", - "mac": "00:25:90:c5:58:6e", - "ip": "10.204.221.61", - "server_params" : { - "ifname": "eth0", - "compute_non_mgmt_ip": "", + "id": "nodec57", + "mac_address": "00:25:90:c5:58:6e", + "ip_address": "10.204.221.61", + "parameters" : { + "interface_name": "eth0", "setup_interface": "Yes", - "compute_non_mgmt_gway": "" }, "roles" : ["compute"], "cluster_id": "5-node", - "vns_id": "5-node", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", "domain": "englab.juniper.net", - "control": { + "control_data_network": { "eth1": { - "ip": "192.168.100.1/24", - "gw": "" + "ip_address": "192.168.100.1/24", + "gateway": "" } }, - "power_address": "10.207.25.151" + "ipmi_address": "10.207.25.151" }, { - "server_id": "nodea4", - "mac": "00:25:90:a5:3b:12", - "ip": "10.204.221.60", - "server_params" : { - "ifname": "eth5", - "compute_non_mgmt_ip": "", + "id": "nodea4", + "mac_address": "00:25:90:a5:3b:12", + "ip_address": "10.204.221.60", + "parameters" : { + "interface_name": "eth5", "setup_interface": "Yes", - "compute_non_mgmt_gway": "" }, "roles" : ["compute"], "cluster_id": "5-node", - "vns_id": "5-node", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", "domain": "englab.juniper.net", - "control": { + "control_data_network": { "bond0": { - "ip": "192.168.100.2/24", - "gw": "" + "ip_address": "192.168.100.2/24", + "gateway": "" } }, - "bond": { + "bond_interface": { "bond0": { "bond_options": "{'miimon': '100', 'mode': '802.3ad','xmit_hash_policy': 'layer3+4'}", - "member": "['eth0', 'eth1', 'eth2', 'eth3']" + "member_interfaces": "['eth0', 'eth1', 'eth2', 'eth3']" } }, - "power_address": "10.207.25.17" + "ipmi_address": "10.207.25.17" }, { - "server_id": "nodec33", - "mac": "00:25:90:c4:82:28", - "ip": "10.204.221.59", - "server_params" : { - "ifname": "eth0", + "id": "nodec33", + "mac_address": "00:25:90:c4:82:28", + "ip_address": "10.204.221.59", + "parameters" : { + "interface_name": "eth0", "setup_interface": "Yes", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "" }, "roles" : ["config","collector","webui","database"], "cluster_id": "5-node", - "vns_id": "5-node", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", "domain": "englab.juniper.net", "control": { "eth1": { - "ip": "192.168.100.3/24", - "gw": "" + "ip_address": "192.168.100.3/24", + "gateway": "" } }, - "power_address": "10.207.25.87" + "ipmi_address": "10.207.25.87" }, { - "server_id": "nodec35", - "mac": "00:25:90:c4:7a:70", - "ip": "10.204.221.58", - "server_params" : { - "ifname": "eth0", + "id": "nodec35", + "mac_address": "00:25:90:c4:7a:70", + "ip_address": "10.204.221.58", + "parameters" : { + "interface_name": "eth0", "setup_interface": "Yes", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "" }, "roles" : ["config","openstack","control"], "cluster_id": "5-node", - "vns_id": "5-node", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", "domain": "englab.juniper.net", "control": { "eth1": { - "ip": "192.168.100.4/24", - "gw": "" + "ip_address": "192.168.100.4/24", + "gateway": "" } }, - "power_address": "10.207.25.89" + "ipmi_address": "10.207.25.89" }, { - "server_id": "nodec60", - "mac": "00:25:90:c5:59:B0", - "ip": "10.204.221.57", - "server_params" : { - "ifname": "eth0", + "id": "nodec60", + "mac_address": "00:25:90:c5:59:B0", + "ip_address": "10.204.221.57", + "parameters" : { + "interface_name": "eth0", "setup_interface": "Yes", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "" }, "roles" : ["config","control"], "cluster_id": "5-node", - "vns_id": "5-node", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.62", + "password": "c0ntrail123", "domain": "englab.juniper.net", "control": { "eth1": { - "ip": "192.168.100.5/24", - "gw": "" + "ip_address": "192.168.100.5/24", + "gateway": "" } }, - "power_address": "10.207.25.154" + "ipmi_address": "10.207.25.154" } ] diff --git a/src/client/server-manager b/src/client/server-manager index 06053d3c..6f2d235d 100755 --- a/src/client/server-manager +++ b/src/client/server-manager @@ -11,8 +11,6 @@ import argparse import pdb import sys -import smgr_create -import smgr_modify import smgr_delete import smgr_show import smgr_upload_image @@ -31,7 +29,7 @@ commands_dict = { 'provision' : smgr_provision_server.provision_server, 'restart' : smgr_restart_server.restart_server, 'upload_image' : smgr_upload_image.upload_image, - 'status' : smgr_status.show_status + 'status' : smgr_status.show_server_status } def server_manager(args_str=None): diff --git a/src/client/server.json b/src/client/server.json index 1ebe0175..add5442f 100644 --- a/src/client/server.json +++ b/src/client/server.json @@ -1,22 +1,27 @@ { "server": [ { - "server_id": "demo2-server", - "mac": "00:25:90:aa:36:32", - "ip": "10.84.51.13", - "server_params" : { - "ifname": "eth1", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "" + "id": "demo2-server", + "mac_address": "00:25:90:aa:36:32", + "ip_address": "10.84.51.13", + "parameters" : { + "interface_name": "eth1", + "partition": "" }, "roles" : ["config","openstack","control","compute","collector","webui","database"], "cluster_id": "demo-cluster", - "vns_id": "demo-vns", - "mask": "255.255.255.0", - "gway": "10.84.51.254", - "passwd": "juniper", + "subnet_mask": "255.255.255.0", + "gateway": "10.84.51.254", + "password": "juniper", "domain": "demo.juniper.net", - "power_address": "10.84.6.63" + "ipmi_address": "10.84.6.63", + "tag" : { + "datacenter" : "demo-dc", + "floor" : "demo-floor", + "hall" : "demo-hall", + "rack" : "demo-rack", + "user_tag" : "demo-user" + } } ] } diff --git a/src/client/server_kernel_upgrade.json b/src/client/server_kernel_upgrade.json new file mode 100644 index 00000000..fb8da42e --- /dev/null +++ b/src/client/server_kernel_upgrade.json @@ -0,0 +1,29 @@ +{ + "server": [ + { + "id": "demo2-server", + "mac_address": "00:25:90:aa:36:32", + "ip_address": "10.84.51.13", + "parameters" : { + "interface_name": "eth1", + "kernel_version": "3.13.0-34", + "kernel_upgrade": "no", + "partition": "" + }, + "roles" : ["config","openstack","control","compute","collector","webui","database"], + "cluster_id": "demo-cluster", + "subnet_mask": "255.255.255.0", + "gateway": "10.84.51.254", + "password": "juniper", + "domain": "demo.juniper.net", + "power_address": "10.84.6.63", + "tag" : { + "datacenter" : "demo-dc", + "floor" : "demo-floor", + "hall" : "demo-hall", + "rack" : "demo-rack", + "user_tag" : "demo-user" + } + } + ] +} diff --git a/src/client/smgr_client_config.ini b/src/client/sm-client-config.ini similarity index 65% rename from src/client/smgr_client_config.ini rename to src/client/sm-client-config.ini index 714ef2e1..e54d0d8e 100644 --- a/src/client/smgr_client_config.ini +++ b/src/client/sm-client-config.ini @@ -4,21 +4,20 @@ listen_ip_addr = 1.1.1.1 ; server manager listening port listen_port = 9001 -[VNS] -mask = 255.255.255.0 +[CLUSTER] +subnet_mask = 255.255.255.0 domain = contrail.juniper.net database_dir = /home/cassandra -encap_priority = MPLSoUDP,MPLSoGRE,VXLAN +encapsulation_priority = MPLSoUDP,MPLSoGRE,VXLAN router_asn = 64512 -ks_user = admin -ks_passwd = contrail123 -passwd = c0ntrail123 +keystone_username = admin +keystone_password = contrail123 +password = c0ntrail123 analytics_data_ttl = 168 haproxy = disable -use_certs = False +use_certificates = False multi_tenancy = False -db_initial_token = +database_token = service_token = contrail123 -openstack_passwd = contrail123 analytics_data_ttl = 168 [SERVER] diff --git a/src/client/smgr_add.py b/src/client/smgr_add.py index 1de3d0d3..fbfa581a 100755 --- a/src/client/smgr_add.py +++ b/src/client/smgr_add.py @@ -6,7 +6,7 @@ Author : Abhay Joshi Description : This program is a simple cli interface to add server manager configuration objects. - Objects can be vns, cluster, server, or image. + Objects can be cluster, server, or image. """ import argparse import pdb @@ -27,68 +27,74 @@ # object parameter values manually instead of providing a # json file. object_dict = { - "vns" : OrderedDict ([ - ("vns_id", "Specify unique vns_id for this vns cluster"), + "cluster" : OrderedDict ([ + ("id", "Specify unique id for this cluster"), ("email", "Email id for notifications"), - ("vns_params", OrderedDict ([ + ("parameters", OrderedDict ([ ("router_asn", "Router asn value"), - ("mask", "Subnet mask"), - ("gway", "Default gateway for servers in this cluster"), - ("passwd", "Default password for servers in this cluster"), + ("subnet_mask", "Subnet mask"), + ("gateway", "Default gateway for servers in this cluster"), + ("password", "Default password for servers in this cluster"), ("domain", "Default domain for servers in this cluster"), ("database_dir", "home directory for cassandra"), - ("db_initial_token", "initial database token"), - ("openstack_mgmt_ip", "openstack management ip"), - ("use_certs", "whether to use certificates for auth (True/False)"), + ("database_token", "initial database token"), + ("use_certificates", "whether to use certificates for auth (True/False)"), ("multi_tenancy", "Openstack multitenancy (True/False)"), ("service_token", "Service token for openstack access"), - ("ks_user", "Keystone user name"), - ("ks_passwd", "keystone password"), - ("ks_tenant", "keystone tenant name"), - ("openstack_passwd", "open stack password"), + ("keystone_username", "Keystone user name"), + ("keystone_password", "keystone password"), + ("keystone_tenant", "keystone tenant name"), ("analytics_data_ttl", "analytics data TTL"), ("osd_bootstrap_key", "OSD Bootstrap Key"), ("admin_key", "Admin Authentication Key"), ("storage_mon_secret", "Storage Monitor Secret Key")])) ]), "server": OrderedDict ([ - ("server_id", "server id value"), - ("ip", "server ip address"), - ("mac", "server mac address"), + ("id", "server id value"), + ("host_name", "host name of the server"), + ("ip_address", "server ip address"), + ("mac_address", "server mac address"), ("roles", "comma-separated list of roles for this server"), - ("server_params", OrderedDict([ - ("ifname", "Ethernet Interface name"), - ("compute_non_mgmt_ip", "compute node non mgmt ip (default none)"), - ("compute_non_mgmt_gway", "compute node non mgmt gway (default none)"), + ("parameters", OrderedDict([ + ("interface_name", "Ethernet Interface name"), + ("partition", "Use this partition and create lvm"), ("disks", "Storage OSDs (default none)")])), - ("vns_id", "vns id the server belongs to"), - ("cluster_id", "Physical cluster id the server belongs to"), - ("pod_id", "pod id the server belongs to"), - ("rack_id", "rack id the server belongs to"), - ("cloud_id", "cloud id the server belongs to"), - ("mask", "subnet mask (default use value from vns table)"), - ("gway", "gateway (default use value from vns table)"), - ("domain", "domain name (default use value from vns table)"), - ("passwd", "root password (default use value from vns table)"), - ("power_pass", "IPMI password"), - ("power_user", "IPMI user"), - ("power_address", "IPMI Address"), - ("email", "email id for notifications (default use value from vns table)"), + ("cluster_id", "cluster id the server belongs to"), + ("tag1", "tag value for this tag"), + ("tag2", "tag value for this tag"), + ("tag3", "tag value for this tag"), + ("tag4", "tag value for this tag"), + ("tag5", "tag value for this tag"), + ("tag6", "tag value for this tag"), + ("tag7", "tag value for this tag"), + ("subnet_mask", "subnet mask (default use value from cluster table)"), + ("gateway", "gateway (default use value from cluster table)"), + ("domain", "domain name (default use value from cluster table)"), + ("password", "root password (default use value from cluster table)"), + ("ipmi_password", "IPMI password"), + ("ipmi_username", "IPMI username"), + ("ipmi_address", "IPMI address"), + ("email", "email id for notifications (default use value from server's cluster)"), ]), "image" : OrderedDict ([ - ("image_id", "Specify unique image id for this image"), - ("image_version", "Specify version for this image"), - ("image_type", + ("id", "Specify unique image id for this image"), + ("version", "Specify version for this image"), + ("type", "ubuntu/centos/contrail-ubuntu-package/contrail-centos-package/contrail-storage-ubuntu-package"), - ("image_path", "complete path where image file is located on server") + ("path", "complete path where image file is located on server") ]), - "cluster" : OrderedDict ([ - ("cluster_id", "Specify unique cluster_id for this cluster"), + "tag" : OrderedDict ([ + ("tag1", "Specify tag name for tag1"), + ("tag2", "Specify tag name for tag2"), + ("tag3", "Specify tag name for tag3"), + ("tag4", "Specify tag name for tag4"), + ("tag5", "Specify tag name for tag5"), + ("tag6", "Specify tag name for tag6"), + ("tag7", "Specify tag name for tag7"), ]), - "server_keys": "['server_id','mac']", - "vns_keys": "['vns_id']", - "cluster_keys": "['cluster_id']", - "image_keys": "['image_id']" + "server_keys": "['id','mac_address']", + "cluster_keys": "['id']", + "image_keys": "['id']" } def parse_arguments(args_str=None): @@ -104,12 +110,7 @@ def parse_arguments(args_str=None): prog="server-manager add" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) @@ -125,12 +126,12 @@ def parse_arguments(args_str=None): "--file_name", "-f", help="json file containing server param values") - # Subparser for vns add - parser_vns = subparsers.add_parser( - "vns", help='Create vns') - parser_vns.add_argument( + # Subparser for server tags add + parser_tag = subparsers.add_parser( + "tag", help='Create tags') + parser_tag.add_argument( "--file_name", "-f", - help="json file containing vns param values") + help="json file containing tag values") # Subparser for cluster add parser_cluster = subparsers.add_parser( @@ -220,16 +221,29 @@ def get_object_config_ini_entries(object, config): # end get_object_config_ini_entries def get_default_object(object, config): + # get current tag settings + payload = {} + resp = send_REST_request( + smgr_ip, smgr_port, + "tag", payload, None, + None, True, "GET" ) + json_str = resp.replace("null", "''") + tag_dict = eval(json_str) + rev_tag_dict = dict((v,k) for k,v in tag_dict.iteritems()) + default_object = {} config_object_defaults = get_object_config_ini_entries(object, config) if not config_object_defaults: return default_object - default_object[object+"_params"] = {} + default_object["parameters"] = {} + default_object["tag"] = {} for key, value in config_object_defaults: if key in object_dict[object]: default_object[key] = value - elif key in object_dict[object][object+"_params"]: - default_object[object+"_params"][key] = value + elif key in object_dict[object]["parameters"]: + default_object["parameters"][key] = value + elif key in rev_tag_dict: + default_object["tag"][key] = value return default_object # end get_default_object @@ -239,32 +253,93 @@ def merge_with_defaults(object, payload, config): default_object = get_default_object(object, config) for i in range(len(payload[object])): obj = payload[object][i] - obj_id = object+"_id" + obj_id = "id" if obj_id not in obj or not obj[obj_id]: continue - if object_exists(object, object+"_id", str(obj[obj_id]), {}): + if object_exists(object, "id", str(obj[obj_id]), {}): continue param_object = {} - if object+"_params" in obj and object+"_params" in default_object: - param_object = dict(default_object[object+"_params"].items() + obj[object+"_params"].items()) - elif object+"_params" in default_object: - param_object = default_object[object+"_params"] + if "parameters" in obj and "parameters" in default_object: + param_object = dict(default_object["parameters"].items() + obj["parameters"].items()) + elif "parameters" in default_object: + param_object = default_object["parameters"] + tag_object = {} + if "tag" in obj and "tag" in default_object: + tag_object = dict(default_object["tag"].items() + obj["tag"].items()) + elif "tag" in default_object: + tag_object = default_object["tag"] payload[object][i] = dict(default_object.items() + obj.items()) if param_object: - payload[object][i][object+"_params"] = param_object + payload[object][i]["parameters"] = param_object + if tag_object: + payload[object][i]["tag"] = tag_object -# end create_vns_default_dict +# end merge_with_defaults +# Function to accept parameters from user and then build payload to be +# sent with REST API request for creating the object of type tag.A +# This function is kept separate as processing is quite different from +# other objects. +def add_tag_payload(object): + payload = {} + fields_dict = object_dict[object] + #post a request for each object + resp = send_REST_request( + smgr_ip, smgr_port, object, payload, + None, None, False, "GET") + json_str = resp.replace("null", "''") + payload = eval(json_str) + while True: + i = 0 + for key in fields_dict.iterkeys(): + value = payload.get( + key, '') + data = str(i) + ". %s : %s" %( + key, value) + print data + i += 1 + # end for + user_input = raw_input( + "Enter index=, " + "empty value to delete tag, Q to end :") + if user_input.upper() == "Q": + break; + try: + user_data = [x.strip() for x in user_input.split('=')] + index = int(user_data[0]) + if index >= len(fields_dict): + print "Invalid Index" + continue + value = user_data[1] + if value: + payload[fields_dict.keys()[index]] = value + else: + payload.pop(fields_dict.keys()[index], None) + except: + print "Invalid input =" + continue + # end while + return payload +# End add_tag_payload # Function to accept parameters from user and then build payload to be # sent with REST API request for creating the object. def add_payload(object, default_object): payload = {} objects = [] + # get current tag settings + resp = send_REST_request( + smgr_ip, smgr_port, + "tag", payload, None, + None, True, "GET" ) + json_str = resp.replace("null", "''") + tag_dict = eval(json_str) + rev_tag_dict = dict((v,k) for k,v in tag_dict.iteritems()) + while True: temp_dict = {} fields_dict = object_dict[object] - obj_id = object+"_id" + obj_id = "id" msg = obj_id + ":" user_input = raw_input(msg) @@ -295,32 +370,44 @@ def add_payload(object, default_object): data = '' i = 0 index_dict = {} + server_tags = obj.get("tag", {}) #form the fields to be displayed with index for key in fields_dict: value = fields_dict[key] - if (key != (object+"_params")): + if (key in ["tag1", "tag2", "tag3", + "tag4", "tag5", "tag6", + "tag7"]): + tag = tag_dict.get(key, None) + if not tag: + continue + data += str(i)+ ". %s : %s \n" %( + tag, server_tags.get(tag, '')) + index_dict[i] = tag + i+=1 + elif (key != ("parameters")): index_dict[i] = key if key in non_mutable_fields : data += str(i)+ ". %s : %s *\n" % (key, obj[key]) elif key == "roles": - data += str(i)+ ". %s : %s \n" % (key, - ','.join(eval(obj[key]))) + data += str(i)+ ". %s : %s \n" % ( + key, ','.join(obj[key])) else: data += str(i)+ ". %s : %s \n" % (key, obj[key]) i+=1 else: - if obj.has_key(object+"_params") and obj[object+"_params"]: - smgr_params = eval(obj[object+"_params"]) + if ("parameters" in obj) and obj["parameters"]: + smgr_params = obj["parameters"].copy() else: smgr_params = {} for param in value: - data += str(i)+ ". %s : %s \n" % (param, - smgr_params.get(param, "")) + data += str(i)+ ". %s : %s \n" % ( + param, smgr_params.get(param, "")) index_dict[i] = param i+=1 #display them print data params_dict = {} + tags = {} #Prompt if users wants to modify a field in # the existing object or continue # adding a new object @@ -329,7 +416,8 @@ def add_payload(object, default_object): " continue with next Object :") if user_selection.strip() == 'C': #print 'send output' - temp_dict[object+"_params"] = params_dict + temp_dict["parameters"] = params_dict + temp_dict["tag"] = tags break else: @@ -343,13 +431,18 @@ def add_payload(object, default_object): continue key_selected = index_dict[eval(user_selection)] - object_params = object_dict[object] [object+"_params"] - if key_selected in object_params.keys(): + object_params = object_dict[object] ["parameters"] + if key_selected in rev_tag_dict: + msg = key_selected + ":" + user_input = rlinput(msg, server_tags.get(key_selected, '')) + tags[key_selected] = user_input + elif key_selected in object_params.keys(): msg = key_selected + ":" value = smgr_params.get(key_selected,"") if key_selected != 'disks': - user_input = rlinput(msg, default_value) - elif key_selected == 'disks' and 'storage' in object_dict["roles"]: + user_input = rlinput( + msg, smgr_params.get(key_selected, '')) + elif key_selected == 'disks' and 'storage-compute' in object_dict["roles"]: disks = raw_input(msg) if disks: disk_list = disks.split(',') @@ -358,9 +451,9 @@ def add_payload(object, default_object): user_input = None params_dict[key_selected] = user_input elif key_selected == "roles": - msg = index_dict[eval(user_selection)] + ":" + msg = key_selected + ":" user_input = rlinput(msg, - ','.join(eval(obj[index_dict[eval(user_selection)]]))) + ','.join(obj[key_selected])) temp_dict[key_selected] = user_input.replace(' ','').split(",") else: msg = index_dict[eval(user_selection)] + ":" @@ -368,19 +461,33 @@ def add_payload(object, default_object): temp_dict[key_selected] = user_input #Add a new object else: - obj_id = object+"_id" + obj_id = "id" + tag = {} for key in fields_dict: if key == obj_id: continue value = fields_dict[key] - #non server params - if (key != (object+"_params")): + if (key in ["tag1", "tag2", "tag3", + "tag4", "tag5", "tag6", + "tag7"]): + msg = tag_dict.get(key, None) + if not msg: + continue + if value: + msg += " (%s) " %(value) + msg += ": " + default_tag = default_object.get("tag", {}) + default_value = default_tag.get(tag_dict[key], "") + user_input = rlinput(msg, default_value) + if user_input: + tag[tag_dict[key]] = user_input + temp_dict['tag'] = tag + elif (key != ("parameters")): msg = key if value: msg += " (%s) " %(value) msg += ": " default_value = default_object.get(key, "") - #user_input = raw_input(msg) user_input = rlinput(msg, default_value) if user_input: # Special case for roles - @@ -401,12 +508,13 @@ def add_payload(object, default_object): msg += " (%s) " %(pvalue) msg += ": " #user_input = raw_input(msg) - if default_object.has_key(object+"_params"): - default_value = default_object[object+"_params"].get(param, "") + if default_object.has_key("parameters"): + default_value = default_object["parameters"].get(param, "") else: default_value = "" user_input = "" - if param == 'disks' and 'storage' in temp_dict["roles"]: + if ((param == 'disks') and ('roles' in temp_dict) and + ('storage-compute' in temp_dict["roles"])): disks = raw_input(msg) if disks: disk_list = disks.split(',') @@ -414,11 +522,11 @@ def add_payload(object, default_object): else: user_input = None else: - user_input = raw_input(msg) + user_input = rlinput(msg, default_value) if user_input: param_dict[param] = user_input temp_dict[key] = param_dict - # End if (key != (object+"_params")) + # End if (key != ("parameters")) # End for key, value in fields_dict objects.append(temp_dict) choice = raw_input("More %s(s) to input? (y/N)" %(object)) @@ -429,6 +537,7 @@ def add_payload(object, default_object): payload[object] = objects return payload # End add_payload + smgr_ip = None smgr_port = None @@ -437,29 +546,23 @@ def add_config(args_str=None): global smgr_ip global smgr_port - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except object = args.object try: if args.file_name: @@ -467,8 +570,11 @@ def add_config(args_str=None): merge_with_defaults(object, payload, config) else: # Accept parameters and construct json. - default_object = get_default_object(object, config) - payload = add_payload(object, default_object) + if object == 'tag': + payload = add_tag_payload(object) + else: + default_object = get_default_object(object, config) + payload = add_payload(object, default_object) except ValueError as e: print "Error in JSON Format : %s" % e sys.exit(1) diff --git a/src/client/smgr_client_def.py b/src/client/smgr_client_def.py index 86285a2b..34dfd10d 100644 --- a/src/client/smgr_client_def.py +++ b/src/client/smgr_client_def.py @@ -6,7 +6,7 @@ import json _DEF_SMGR_PORT = 9001 -_DEF_SMGR_CFG_FILE = os.path.dirname(__file__) + "/smgr_client_config.ini" +_DEF_SMGR_CFG_FILE = os.path.dirname(__file__) + "/sm-client-config.ini" def print_rest_response(resp): if resp: diff --git a/src/client/smgr_contrail_status.py b/src/client/smgr_contrail_status.py new file mode 100755 index 00000000..9b7da548 --- /dev/null +++ b/src/client/smgr_contrail_status.py @@ -0,0 +1,202 @@ +#!/usr/bin/python + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +""" + Name : smgr_status.py + Author : Prasad Miriyala & Bharat Putta + Description : This program is a simple cli interface to + get status of a server or all the servers in a Cluster. +""" +import argparse +import cgitb +import sys +import pycurl +from StringIO import StringIO +import ConfigParser +import json +import smgr_client_def + +def parse_arguments(): + # Process the arguments + if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='''Show a Server Manager object''' + ) + else: + parser = argparse.ArgumentParser( + description='''Show a Server Manager object''', + prog="server-manager status" + ) + # end else + parser.add_argument("--config_file", "-c", + help=("Server manager client config file " + " (default - %s)" %( + smgr_client_def._DEF_SMGR_CFG_FILE))) + parser.add_argument("--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") + subparsers = parser.add_subparsers(title='objects', + description='valid objects', + help='help for object') + + # Subparser for server status + parser_server = subparsers.add_parser( + "server",help='Status server') + group = parser_server.add_mutually_exclusive_group() + group.add_argument("--server_id", + help=("server id for server")) + parser_server.set_defaults(get_rest_params=server_rest_params) + parser_server.set_defaults(get_status=get_server_status) + + + # Subparser for cluster show + parser_cluster = subparsers.add_parser( + "cluster", help='Status cluster') + parser_cluster.add_argument("--cluster_id", + help=("id for cluster")) + parser_cluster.set_defaults(get_rest_params=cluster_rest_params) + parser_cluster.set_defaults(get_status=get_cluster_status) + + return parser +# end def parse_arguments + +def send_REST_request(ip, port, object, match_key, + match_value, detail): + try: + response = StringIO() + headers = ["Content-Type:application/json"] + url = "http://%s:%s/%s" % (ip, port, object) + args_str = '' + if match_key: + args_str += match_key + "=" + match_value + if detail: + args_str += "&detail" + if args_str != '': + url += "?" + args_str + conn = pycurl.Curl() + conn.setopt(pycurl.TIMEOUT, 1) + conn.setopt(pycurl.URL, url) + conn.setopt(pycurl.HTTPHEADER, headers) + conn.setopt(pycurl.HTTPGET, 1) + conn.setopt(pycurl.WRITEFUNCTION, response.write) + conn.perform() + return response.getvalue() + except: + return None +# end def send_REST_request + +def server_rest_params(args): + rest_api_params = {} + rest_api_params['object'] = 'status' + if args.server_id: + rest_api_params['match_key'] = 'id' + rest_api_params['match_value'] = args.server_id + else: + rest_api_params['match_key'] = None + rest_api_params['match_value'] = None + return rest_api_params +#end def server_status_rest_params + +def cluster_rest_params(args): + rest_api_params = {} + rest_api_params['object'] = 'server' + if args.cluster_id: + rest_api_params['match_key'] = 'cluster_id' + rest_api_params['match_value'] = args.cluster_id + else: + rest_api_params['match_key'] = None + rest_api_params['match_value'] = None + return rest_api_params +#end def cluster_status_rest_params + +def get_obj(resp): + try: + data = json.loads(resp) + return data + except ValueError: + return {} +#end def get_obj + +def get_server_status(args, smgr_ip, smgr_port): + rest_api_params = args.get_rest_params(args) + resp = send_REST_request(smgr_ip, smgr_port, + rest_api_params['object'], + rest_api_params['match_key'], + rest_api_params['match_value'], + args.detail) + if resp is not None: + status = get_obj(resp) + if 'server_status' not in status: + return + server_status = status['server_status'] + modified_status = server_status.replace('active', 'active\n') \ + .replace('failed', 'failed\n') \ + .replace('STARTIN', 'STARTIN\n') \ + .replace('BACKOFF', 'BACKOFF\n') \ + .replace( ' ==', ' ==\n') \ + .replace('NOT PRESENT', 'NOT PRESENT\n')\ + .replace('EXITED', 'EXITED\n') + print modified_status +#end def get_server_status + +def get_cluster_status(args, smgr_ip, smgr_port): + rest_api_params = args.get_rest_params(args) + resp = send_REST_request(smgr_ip, smgr_port, + rest_api_params['object'], + rest_api_params['match_key'], + rest_api_params['match_value'], + args.detail) + servers = json.loads(resp)['server'] + for server in servers: + server_id = server['id'] + server_resp = send_REST_request(smgr_ip, smgr_port, + 'status', + 'id', + server_id.encode('ascii','ignore'), + args.detail) + if server_resp is None: + continue + status = get_obj(server_resp) + if 'server_status' not in status: + continue + server_status = status['server_status'] + modified_status = server_status.replace('active', 'active\n') \ + .replace('failed', 'failed\n') \ + .replace('STARTIN', 'STARTIN\n') \ + .replace('BACKOFF', 'BACKOFF\n') \ + .replace( ' ==', ' ==\n') \ + .replace('NOT PRESENT', 'NOT PRESENT\n') \ + .replace('EXITED', 'EXITED\n') + print ("Server %s status:") % (server_id) + print modified_status + print "\n" + +#end def get_cluster_status + +def show_status(args_str=None): + parser = parse_arguments() + args = parser.parse_args(args_str) + if args.config_file: + config_file = args.config_file + else: + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except + args.get_status(args, smgr_ip, smgr_port) +# End of show_status + + +if __name__ == "__main__": + cgitb.enable(format='text') + show_status(sys.argv[1:]) +# End if __name__ diff --git a/src/client/smgr_create.py b/src/client/smgr_create.py deleted file mode 100755 index a3876413..00000000 --- a/src/client/smgr_create.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -""" - Name : smgr_create.py - Author : Abhay Joshi - Description : This program is a simple cli interface to - create server manager configuration objects. - Objects can be vns, cluster, server, or image. -""" -import argparse -import pdb -import sys -import pycurl -from StringIO import StringIO -import json -try: - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict -import ConfigParser -import smgr_client_def - -# Below array of dictionary's is used by create_payload -# function to create payload when user choses to input -# object parameter values manually instead of providing a -# json file. -object_dict = { - "vns" : OrderedDict ([ - ("vns_id", "Specify unique vns_id for this vns cluster"), - ("email", "Email id for notifications"), - ("vns_params", OrderedDict ([ - ("router_asn", "Router asn value"), - ("mask", "Subnet mask"), - ("gway", "Default gateway for servers in this cluster"), - ("passwd", "Default password for servers in this cluster"), - ("domain", "Default domain for servers in this cluster"), - ("database_dir", "home directory for cassandra"), - ("db_initial_token", "initial database token"), - ("openstack_mgmt_ip", "openstack management ip"), - ("use_certs", "whether to use certificates for auth (True/False)"), - ("multi_tenancy", "Openstack multitenancy (True/False)"), - ("service_token", "Service token for openstack access"), - ("ks_user", "Keystone user name"), - ("ks_passwd", "keystone password"), - ("ks_tenant", "keystone tenant name"), - ("openstack_passwd", "open stack password"), - ("analytics_data_ttl", "analytics data TTL"), - ("osd_bootstrap_key", "OSD Bootstrap Key"), - ("admin_key", "Admin Authentication Key"), - ("storage_mon_secret", "Storage Monitor Secret Key")])) - ]), - "server": OrderedDict ([ - ("server_id", "server id value"), - ("ip", "server ip address"), - ("mac", "server mac address"), - ("roles", "comma-separated list of roles for this server"), - ("server_params", OrderedDict([ - ("ifname", "Ethernet Interface name"), - ("compute_non_mgmt_ip", "compute node non mgmt ip (default none)"), - ("compute_non_mgmt_gway", "compute node non mgmt gway (default none)"), - ("disks", "Storage OSDs")])), - ("vns_id", "vns id the server belongs to"), - ("cluster_id", "Physical cluster id the server belongs to"), - ("pod_id", "pod id the server belongs to"), - ("rack_id", "rack id the server belongs to"), - ("cloud_id", "cloud id the server belongs to"), - ("mask", "subnet mask (default use value from vns table)"), - ("gway", "gateway (default use value from vns table)"), - ("domain", "domain name (default use value from vns table)"), - ("passwd", "root password (default use value from vns table)"), - ("email", "email id for notifications (default use value from vns table)"), - ]), - "image" : OrderedDict ([ - ("image_id", "Specify unique image id for this image"), - ("image_version", "Specify version for this image"), - ("image_type", "ubuntu/centos/contrail-ubuntu-repo"), - ("image_path", "complete path where image file is located on server") - ]), - "cluster" : OrderedDict ([ - ("cluster_id", "Specify unique cluster_id for this cluster"), - ]) -} - -def parse_arguments(args_str=None): - - # Process the arguments - if __name__ == "__main__": - parser = argparse.ArgumentParser( - description='''Create a Server Manager object''' - ) - else: - parser = argparse.ArgumentParser( - description='''Create a Server Manager object''', - prog="server-manager create" - ) - # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", - help=("Server manager client config file " - " (default - %s)" %( - smgr_client_def._DEF_SMGR_CFG_FILE))) - subparsers = parser.add_subparsers(title='objects', - description='valid objects', - help='help for objects', - dest='object') - - # Subparser for server create - parser_server = subparsers.add_parser( - "server",help='Create server') - parser_server.add_argument( - "--file_name", "-f", - help="json file containing server param values") - - # Subparser for vns create - parser_vns = subparsers.add_parser( - "vns", help='Create vns') - parser_vns.add_argument( - "--file_name", "-f", - help="json file containing vns param values") - - # Subparser for cluster create - parser_cluster = subparsers.add_parser( - "cluster", help='Create cluster') - parser_cluster.add_argument( - "--file_name", "-f", - help="json file containing cluster param values") - - # Subparser for image create - parser_image = subparsers.add_parser( - "image", help='Create image') - parser_image.add_argument( - "--file_name", "-f", - help="json file containing image param values") - - args = parser.parse_args(args_str) - return args -# end def parse_arguments - -def send_REST_request(ip, port, object, payload): - try: - response = StringIO() - headers = ["Content-Type:application/json"] - url = "http://%s:%s/%s" %( - ip, port, object) - conn = pycurl.Curl() - conn.setopt(pycurl.URL, url) - conn.setopt(pycurl.HTTPHEADER, headers) - conn.setopt(pycurl.POST, 1) - conn.setopt(pycurl.POSTFIELDS, '%s'%json.dumps(payload)) - conn.setopt(pycurl.CUSTOMREQUEST, "PUT") - conn.setopt(pycurl.WRITEFUNCTION, response.write) - conn.perform() - return response.getvalue() - except: - return None - -# Function to accept parameters from user and then build payload to be -# sent with REST API request for creating the object. -def create_payload(object): - payload = {} - objects = [] - while True: - temp_dict = {} - fields_dict = object_dict[object] - for key in fields_dict: - value = fields_dict[key] - if (key != (object+"_params")): - msg = key - if value: - msg += " (%s) " %(value) - msg += ": " - user_input = raw_input(msg) - if user_input: - # Special case for roles - - # store as a list - if key == "roles": - temp_dict[key] = user_input.strip().split(",") - else: - temp_dict[key] = user_input - else: - param_dict = {} - for param in value: - pvalue = value[param] - msg = param - if pvalue: - msg += " (%s) " %(pvalue) - msg += ": " - user_input = "" - if param == 'disks' and 'storage' in temp_dict["roles"]: - disks = raw_input(msg) - if disks: - disk_list = disks.split(',') - user_input = [str(d) for d in disk_list] - else: - user_input = raw_input(msg) - if user_input: - param_dict[param] = user_input - temp_dict[key] = param_dict - # End if (key != (object+"_params")) - # End for key, value in fields_dict - objects.append(temp_dict) - choice = raw_input("More %s(s) to input? (y/N)" %(object)) - if ((not choice) or - (choice.lower() != "y")): - break - # End while True - payload[object] = objects - return payload -# End create_payload - -def create_config(args_str=None): - args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT - else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port - object = args.object - if args.file_name: - payload = json.load(open(args.file_name)) - else: - # Accept parameters and construct json. - payload = create_payload(object) - - resp = send_REST_request(smgr_ip, smgr_port, - object, payload) - print resp -# End of create_config - -if __name__ == "__main__": - import cgitb - cgitb.enable(format='text') - - create_config(sys.argv[1:]) -# End if __name__ diff --git a/src/client/smgr_create_db.py b/src/client/smgr_create_db.py index e8e93d68..c4e4af75 100755 --- a/src/client/smgr_create_db.py +++ b/src/client/smgr_create_db.py @@ -5,12 +5,13 @@ Author : rishiv@juniper.net Description : This program is a simple cli interface to create server manager database with objects. - Objects can be vns, cluster, server, or image. - Takes -t testbed.py or/and --vns_id as command line input + Objects can be cluster, server or image. + Mandatory Parameter : testbed.py + Optional Parameter : cluster_id + Optional Parameter : server Manager specific config file """ -import pdb import subprocess import json import string @@ -31,7 +32,7 @@ def svrmgr_add_all(): verify_user_input() create_json() - add_vns() + add_cluster() add_image() add_pkg() add_server() @@ -39,7 +40,7 @@ def svrmgr_add_all(): def create_json(): modify_server_json() - modify_vns_json() + modify_cluster_json() def modify_server_json(): @@ -58,6 +59,8 @@ def modify_server_json(): server_dict = json.loads(in_data) update_roles_from_testbed_py(server_dict) + update_bond_from_testbed_py(server_dict) + update_multi_if_from_testbed_py(server_dict) out_file = open(server_file, 'w') out_data = json.dumps(server_dict, indent=4) @@ -78,7 +81,7 @@ def update_roles_from_testbed_py(server_dict): continue for host_string in testbed.env.roledefs[key]: ip = getIp(host_string) - if node['ip'] == ip: + if node['ip_address'] == ip: if key == 'cfgm': roles.append("config") else: @@ -89,11 +92,61 @@ def update_roles_from_testbed_py(server_dict): node['roles'] = roles for node in server_dict['server']: - node['vns_id'] = get_pref_vns_id() + node['cluster_id'] = get_pref_cluster_id() return server_dict # end update_roles_from_testbed_py +def update_bond_from_testbed_py(server_dict): + testbed = get_testbed() + if 'control_data' in dir(testbed): + for node in server_dict['server']: + for key in testbed.bond: + ip = getIp(key) + if node['ip_address'] == ip: + node['parameters']['setup_interface'] = "Yes" + #node['parameters']['compute_non_mgmt_ip'] = "" + #node['parameters']['compute_non_mgmt_gw'] = "" + + name = testbed.bond[key]['name'] + mode = testbed.bond[key]['mode'] + member = testbed.bond[key]['member'] + option = {} + option['miimon'] = '100' + option['mode'] = mode + option['xmit_hash_policy'] = 'layer3+4' + + node['bond']={} + node['bond'][name]={} + node['bond'][name]['bond_options'] = "%s"%option + node['bond'][name]['member'] = "%s"%member + return server_dict +#End update_bond_from_testbed_py(server_dict): + + +def update_multi_if_from_testbed_py(server_dict): + testbed = get_testbed() + if 'control_data' in dir(testbed): + for node in server_dict['server']: + for key in testbed.control_data: + ip = getIp(key) + if node['ip_address'] == ip: + node['parameters']['setup_interface'] = "Yes" + #node['parameters']['compute_non_mgmt_ip'] = "" + #node['parameters']['compute_non_mgmt_gway'] = "" + + ip = testbed.control_data[key]['ip'] + gw = testbed.control_data[key]['gw'] + device = testbed.control_data[key]['device'] + + node['control']={} + node['control'][device] = {} + node['control'][device]['ip'] = ip + node['control'][device]['gw'] = gw + return server_dict +#End update_multi_if_from_testbed_py(server_dict): + + def getIp(string) : regEx = re.compile( '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' ) @@ -134,64 +187,64 @@ def get_pkg_id() : # end get_pkg_id() -def get_vns_id() : +def get_cluster_id() : params=read_ini_file(sys.argv[1:]) - vns_file = params['vns_file'] + cluster_file = params['cluster_file'] - vns_file = open( vns_file, 'r' ) - vns_data = vns_file.read() - vns_json = json.loads(vns_data) - vns_id = vns_json['vns'][0]['vns_id'] - vns_file.close() - return vns_id + cluster_file = open( cluster_file, 'r' ) + cluster_data = cluster_file.read() + cluster_json = json.loads(cluster_data) + cluster_id = cluster_json['cluster'][0]['id'] + cluster_file.close() + return cluster_id -# end get_vns_id() +# end get_cluster_id() -def add_vns(): - vns_file = None +def add_cluster(): + cluster_file = None params=read_ini_file(sys.argv[1:]) if params: try: - vns_file = params['vns_file'] + cluster_file = params['cluster_file'] except KeyError: pass - vns_id = get_pref_vns_id() - if not vns_file: - vns_dict = get_vns_with_vns_id_from_db() - if not len(vns_dict['vns']): - vns_dict = new_vns() + cluster_id = get_pref_cluster_id() + if not cluster_file: + cluster_dict = {} + cluster_dict = get_cluster_with_cluster_id_from_db() + if not len(cluster_dict['cluster']): + cluster_dict = new_cluster() else: - vns_dict = { - "vns" : [ + cluster_dict = { + "cluster" : [ { - "vns_id" : "", - "vns_params" : { + "id" : "", + "parameters" : { } } ] } - vns_dict['vns'][0]['vns_id'] = vns_id - modify_vns_from_testbed_py(vns_dict) + cluster_dict['cluster'][0]['id'] = cluster_id + modify_cluster_from_testbed_py(cluster_dict) temp_dir= expanduser("~") - vns_file = '%s/vns.json' %temp_dir - subprocess.call('touch %s' %vns_file, shell = True) - out_file = open(vns_file, 'w') - out_data = json.dumps(vns_dict, indent=4) + cluster_file = '%s/cluster.json' %temp_dir + subprocess.call('touch %s' %cluster_file, shell = True) + out_file = open(cluster_file, 'w') + out_data = json.dumps(cluster_dict, indent=4) out_file.write(out_data) out_file.close() else : timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S") - subprocess.call( 'cp %s %s.org.%s' %(vns_file, vns_file, timestamp), shell=True ) - subprocess.call("sed -i 's/\"vns_id\".*,/\"vns_id\":\"%s\",/' %s" %(vns_id,vns_file), shell=True ) - subprocess.call("sed -i 's/\"vns_id\".*/\"vns_id\":\"%s\"/' %s" %(vns_id,vns_file), shell=True ) + subprocess.call( 'cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp), shell=True ) + subprocess.call("sed -i 's/\"id\":.*,/\"id\":\"%s\",/' %s" %(cluster_id,cluster_file), shell=True ) - subprocess.call('server-manager add vns -f %s' %(vns_file), shell=True ) + subprocess.call('server-manager add cluster -f %s' %(cluster_file), shell=True ) -# end add_vns() +# end add_cluster() def add_server(): add_server_using_json() @@ -234,74 +287,82 @@ def add_server_using_json(): -def modify_vns_json(): +def modify_cluster_json(): params=read_ini_file(sys.argv[1:]) if not params: return None - if not params.has_key('vns_file'): + if not params.has_key('cluster_file'): return None - vns_file = params['vns_file'] + cluster_file = params['cluster_file'] timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S") - subprocess.call( 'cp %s %s.org.%s' %(vns_file, vns_file, timestamp), shell=True ) + subprocess.call( 'cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp), shell=True ) - in_file = open( vns_file, 'r' ) + in_file = open( cluster_file, 'r' ) in_data = in_file.read() - vns_dict = json.loads(in_data) + cluster_dict = json.loads(in_data) - modify_vns_from_testbed_py(vns_dict) + modify_cluster_from_testbed_py(cluster_dict) - out_file = open(vns_file, 'w') - out_data = json.dumps(vns_dict, indent=4) + out_file = open(cluster_file, 'w') + out_data = json.dumps(cluster_dict, indent=4) out_file.write(out_data) out_file.close() -def modify_vns_from_testbed_py(vns_dict): +def modify_cluster_from_testbed_py(cluster_dict): testbed = get_testbed() if testbed.env.has_key('mail_to'): - vns_dict['vns'][0]['email'] = testbed.env.mail_to + cluster_dict['cluster'][0]['email'] = testbed.env.mail_to if testbed.env.has_key('encap_priority'): - vns_dict['vns'][0]['vns_params']['encap_priority'] = testbed.env.encap_priority + cluster_dict['cluster'][0]['parameters']['encapsulation_priority'] = testbed.env.encap_priority + #if 'multi_tenancy' in dir(testbed): + # cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = testbed.multi_tenancy if 'multi_tenancy' in dir(testbed): - vns_dict['vns'][0]['vns_params']['multi_tenancy'] = testbed.multi_tenancy + if testbed.multi_tenancy == True : + cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "True" + elif testbed.multi_tenancy == False : + cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False" + else: + cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False" + if 'os_username' in dir(testbed): - vns_dict['vns'][0]['vns_params']['ks_user'] = testbed.os_username + cluster_dict['cluster'][0]['parameters']['keystone_username'] = testbed.os_username if 'os_password' in dir(testbed): - vns_dict['vns'][0]['vns_params']['ks_passwd'] = testbed.os_password + cluster_dict['cluster'][0]['parameters']['keystone_password'] = testbed.os_password if 'os_tenant_name' in dir(testbed): - vns_dict['vns'][0]['vns_params']['ks_tenant'] = testbed.os_tenant_name + cluster_dict['cluster'][0]['parameters']['keystone_tenant'] = testbed.os_tenant_name if 'router_asn' in dir(testbed): - vns_dict['vns'][0]['vns_params']['router_asn'] = testbed.router_asn + cluster_dict['cluster'][0]['parameters']['router_asn'] = testbed.router_asn -def new_vns(): +def new_cluster(): params=read_ini_file(sys.argv[1:]) - vns_id = get_user_vns_id() - if not vns_id: - vns_id = params['vns_id'] - vns_dict = { - "vns" : [ + cluster_id = get_user_cluster_id() + if not cluster_id: + cluster_id = params['cluster_id'] + cluster_dict={ + "cluster" : [ { - "vns_id" : vns_id, - "vns_params" : { + "id" : cluster_id, + "parameters" : { "router_asn": "64512", "database_dir": "/home/cassandra", - "db_initial_token": "", + "database_token": "", "openstack_mgmt_ip": "", - "use_certs": "False", + "use_certificates": "False", "multi_tenancy": "False", - "encap_priority": "'MPLSoUDP','MPLSoGRE','VXLAN'", + "encapsulation_priority": "'MPLSoUDP','MPLSoGRE','VXLAN'", "service_token": "contrail123", - "ks_user": "admin", - "ks_passwd": "contrail123", - "ks_tenant": "admin", - "openstack_passwd": "contrail123", + "keystone_user": "admin", + "keystone_password": "contrail123", + "keystone_tenant": "admin", + "openstack_password": "contrail123", "analytics_data_ttl": "168", - "mask": "255.255.255.0", - "gway": "1.1.1.254", - "passwd": "c0ntrail123", + "subnet_mask": "255.255.255.0", + "gateway": "1.1.1.254", + "password": "c0ntrail123", "domain": "contrail.juniper.net", "haproxy": "disable" } @@ -310,26 +371,31 @@ def new_vns(): } config = ConfigParser.SafeConfigParser() config.read([smgr_client_def._DEF_SMGR_CFG_FILE]) - default_config_object = get_default_object("vns", config) - vns_params_dict = dict(vns_dict["vns"][0]["vns_params"].items() + default_config_object["vns_params"].items()) - tmp_vns_dict = dict(vns_dict["vns"][0].items() + default_config_object.items()) - tmp_vns_dict["vns_params"] = vns_params_dict - vns_dict["vns"][0] = tmp_vns_dict - return vns_dict + default_config_object = get_default_object("cluster", config) + cluster_params_dict = dict(cluster_dict["cluster"][0]["parameters"].items() + default_config_object["parameters"].items()) + tmp_cluster_dict = dict(cluster_dict["cluster"][0].items() + default_config_object.items()) + tmp_cluster_dict["parameters"] = cluster_params_dict + cluster_dict["cluster"][0] = tmp_cluster_dict + return cluster_dict -# End new_vns() +# End new_cluster() def parse_arguments(args_str=None): parser = argparse.ArgumentParser( - description='''Server Manager Tool to generate json from testbed.py''' + description='''Server Manager Tool to generate json from testbed.py . + Value specified in --cluster_id will override value in + server.json and vns.json . + ''', + usage= '''server-manager [-f ] [-c ] -t testbed.py ''' + ) #group1 = parser.add_mutually_exclusive_group() - parser.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-f", help="Server manager client config file ") - parser.add_argument("--vns_id", "-v", - help="user specified preferred vns_id ") + parser.add_argument("--cluster_id", "-c", + help="user specified preferred cluster_id ") parser.add_argument("--testbed_py", "-t", help="your testbed.py file") @@ -367,23 +433,23 @@ def get_testbed_py(args_str=None): # End read_ini_file -def get_user_vns_id(args_str=None): +def get_user_cluster_id(args_str=None): args = parse_arguments(args_str) - vns_id = None - if args.vns_id: - vns_id = args.vns_id - return vns_id + cluster_id = None + if args.cluster_id: + cluster_id = args.cluster_id + return cluster_id -def get_server_with_vns_id_from_db(): - vns_id = get_pref_vns_id() +def get_server_with_cluster_id_from_db(): + cluster_id = get_pref_cluster_id() temp_dir= expanduser("~") - file_name = '%s/server_with_vns_id_from_db.json' %(temp_dir) - subprocess.call('server-manager show --detail server --vns_id %s \ + file_name = '%s/server_with_cluster_id_from_db.json' %(temp_dir) + subprocess.call('server-manager show server --cluster_id %s --detail \ | tr -d "\n" \ | sed "s/[^{]*//" \ - > %s' %(vns_id, file_name), shell=True ) + > %s' %(cluster_id, file_name), shell=True ) in_file = open( file_name, 'r' ) @@ -392,29 +458,29 @@ def get_server_with_vns_id_from_db(): return server_dict -def get_vns_with_vns_id_from_db(): - vns_id = get_user_vns_id() - if not vns_id: +def get_cluster_with_cluster_id_from_db(): + cluster_id = get_user_cluster_id() + if not cluster_id: params=read_ini_file(sys.argv[1:]) - vns_id = params['vns_id'] + cluster_id = params['cluster_id'] - vns_dict = {"vns": []} + cluster_dict = {"cluster": []} temp_dir= expanduser("~") - file_name = '%s/vns.json' %(temp_dir) + file_name = '%s/cluster.json' %(temp_dir) - subprocess.call('server-manager show --detail vns --vns_id %s \ + subprocess.call('server-manager show cluster --cluster_id %s --detail \ | tr -d "\n" \ | sed "s/[^{]*//" \ - > %s' %(vns_id, file_name), shell=True ) + > %s' %(cluster_id, file_name), shell=True ) in_file = open( file_name, 'r' ) in_data = in_file.read() - vns_dict = json.loads(in_data) + cluster_dict = json.loads(in_data) - return vns_dict + return cluster_dict def get_server_with_ip_from_db(ip=None): @@ -429,7 +495,7 @@ def get_server_with_ip_from_db(ip=None): file_name = '%s/server.json' %(temp_dir) - subprocess.call('server-manager show --detail server --ip %s \ + subprocess.call('server-manager show server --ip %s --detail \ | tr -d "\n" \ | sed "s/[^{]*//" \ > %s' %(ip, file_name), shell=True ) @@ -460,10 +526,30 @@ def get_host_roles_from_testbed_py(): return node # end get_host_roles_from_testbed_py +def get_storage_node_config_from_testbed_py(): + testbed = get_testbed() + storage_config = {} + allowed_disk_types = ['disks'] + if not testbed.env.has_key('storage_node_config'): + return storage_config + for key in testbed.env.storage_node_config: + node_config_dict = dict(testbed.env.storage_node_config[key]) + ip = getIp(key) + if not storage_config.has_key(ip): + storage_config[ip] = {} + for disk_type in node_config_dict: + if disk_type not in allowed_disk_types: + print ("ERROR: An invalid disk type has been specified in the testbed.py storage node config") + else: + storage_config[ip][disk_type] = node_config_dict[disk_type] + return storage_config +# end get_storage_node_config_from_testbed_py + def update_server_in_db_with_testbed_py(): - vns_id = get_pref_vns_id() + cluster_id = get_pref_cluster_id() node = get_host_roles_from_testbed_py() + storage_config = get_storage_node_config_from_testbed_py() if not node: return u_server_dict = {} @@ -474,11 +560,15 @@ def update_server_in_db_with_testbed_py(): if not server_dict or not server_dict['server']: print ("ERROR: Server with ip %s not present in Server Manager" % key) continue - server_id = server_dict['server'][0]['server_id'] + server_id = server_dict['server'][0]['id'] u_server = {} - u_server['server_id'] = server_id - u_server['vns_id'] = vns_id + u_server['id'] = server_id + u_server['cluster_id'] = cluster_id u_server['roles'] = node[key] + u_server['server_params'] = {} + if key in storage_config: + for disk_type in storage_config[key]: + u_server['server_params'][disk_type] = storage_config[key][disk_type] u_server_dict['server'].append(u_server) temp_dir= expanduser("~") @@ -491,24 +581,24 @@ def update_server_in_db_with_testbed_py(): subprocess.call('server-manager add server -f %s' %(server_file), shell=True ) for u_server in u_server_dict['server']: - subprocess.call('server-manager show --detail server --server_id %s' \ - % u_server['server_id'], shell=True ) -#End update_server_in_db_with_vns_id + subprocess.call('server-manager show server --server_id %s --detail' \ + % u_server['id'], shell=True ) +#End update_server_in_db_with_cluster_id -def get_pref_vns_id(): - vns_id = get_user_vns_id() - if not vns_id: +def get_pref_cluster_id(): + cluster_id = get_user_cluster_id() + if not cluster_id: params=read_ini_file(sys.argv[1:]) - vns_id = params['vns_id'] - return vns_id + cluster_id = params['cluster_id'] + return cluster_id def verify_user_input(): params=read_ini_file(sys.argv[1:]) - vns_id = get_user_vns_id() + cluster_id = get_user_cluster_id() - if not params and not vns_id: - sys.exit(" User should either provide --vns_id or config.ini ") + if not params and not cluster_id: + sys.exit(" User should either provide --cluster_id or config.ini ") def get_testbed(): filepath = get_testbed_py(sys.argv[1:]) diff --git a/src/client/smgr_delete.py b/src/client/smgr_delete.py index 7c4f9441..0010f064 100755 --- a/src/client/smgr_delete.py +++ b/src/client/smgr_delete.py @@ -6,7 +6,7 @@ Author : Abhay Joshi Description : This program is a simple cli interface to delete server manager configuration objects. - Objects can be vns, cluster, server, or image. + Objects can be cluster, server, or image. """ import argparse import pdb @@ -28,12 +28,7 @@ def parse_arguments(): prog="server-manager delete" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) @@ -51,31 +46,21 @@ def parse_arguments(): help=("mac address for server to be deleted")) group.add_argument("--ip", help=("ip address for server to be deleted")) - group.add_argument("--vns_id", - help=("vns id for server(s) to be deleted")) group.add_argument("--cluster_id", help=("cluster id for server(s) to be deleted")) - group.add_argument("--rack_id", - help=("rack id for server(s) to be deleted")) - group.add_argument("--pod_id", - help=("pod id for server(s) to be deleted")) + group.add_argument("--tag", + help=("tag values for the server to be deleted " + "in t1=v1,t2=v2,... format")) parser_server.set_defaults(func=delete_server) - # Subparser for vns delete - parser_vns = subparsers.add_parser( - "vns", help='Delete vns') - parser_vns.add_argument("vns_id", - help=("vns id for vns to be deleted")) - parser_vns.add_argument("--force", "-f", action="store_true", - help=("optional parameter to indicate ," - "if vns association to be removed from server")) - parser_vns.set_defaults(func=delete_vns) - # Subparser for cluster delete parser_cluster = subparsers.add_parser( "cluster", help='Delete cluster') parser_cluster.add_argument("cluster_id", - help=("cluster id for cluster to be deleted")) + help=("cluster id for vns to be deleted")) + parser_cluster.add_argument("--force", "-f", action="store_true", + help=("optional parameter to indicate ," + "if cluster association to be removed from server")) parser_cluster.set_defaults(func=delete_cluster) # Subparser for image delete @@ -110,45 +95,30 @@ def delete_server(args): rest_api_params = {} rest_api_params['object'] = 'server' if args.server_id: - rest_api_params['match_key'] = 'server_id' + rest_api_params['match_key'] = 'id' rest_api_params['match_value'] = args.server_id elif args.mac: - rest_api_params['match_key'] = 'mac' + rest_api_params['match_key'] = 'mac_address' rest_api_params['match_value'] = args.mac elif args.ip: - rest_api_params['match_key'] = 'ip' + rest_api_params['match_key'] = 'ip_address' rest_api_params['match_value'] = args.ip - elif args.vns_id: - rest_api_params['match_key'] = 'vns_id' - rest_api_params['match_value'] = args.vns_id elif args.cluster_id: rest_api_params['match_key'] = 'cluster_id' rest_api_params['match_value'] = args.cluster_id - elif args.rack_id: - rest_api_params['match_key'] = 'rack_id' - rest_api_params['match_value'] = args.rack_id - elif args.pod_id: - rest_api_params['match_key'] = 'pod_id' - rest_api_params['match_value'] = args.pod_id + elif args.tag: + rest_api_params['match_key'] = 'tag' + rest_api_params['match_value'] = args.tag else: rest_api_params['match_key'] = '' rest_api_params['match_value'] = '' return rest_api_params #end def delete_server -def delete_vns(args): - rest_api_params = { - 'object' : 'vns', - 'match_key' : 'vns_id', - 'match_value' : args.vns_id - } - return rest_api_params -#end def delete_vns - def delete_cluster(args): rest_api_params = { 'object' : 'cluster', - 'match_key' : 'cluster_id', + 'match_key' : 'id', 'match_value' : args.cluster_id } return rest_api_params @@ -157,7 +127,7 @@ def delete_cluster(args): def delete_image(args): rest_api_params = { 'object' : 'image', - 'match_key' : 'image_id', + 'match_key' : 'id', 'match_value' : args.image_id } return rest_api_params @@ -166,29 +136,23 @@ def delete_image(args): def delete_config(args_str=None): parser = parse_arguments() args = parser.parse_args(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except rest_api_params = args.func(args) force = False if 'force' in args: diff --git a/src/client/smgr_modify.py b/src/client/smgr_modify.py deleted file mode 100755 index d1419817..00000000 --- a/src/client/smgr_modify.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/python - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -""" - Name : smgr_modify.py - Author : Abhay Joshi - Description : This program is a simple cli interface to - modify server manager configuration objects. - Objects can be vns, cluster, server, or image. -""" -import argparse -import pdb -import sys -import pycurl -from StringIO import StringIO -import json -try: - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict -import ConfigParser -import smgr_client_def - -# Below array of dictionary's is used by create_payload -# function to create payload when user choses to input -# object parameter values manually instead of providing a -# json file. -object_dict = { - "vns" : OrderedDict ([ - ("vns_id", "vns_id for vns to be modfied"), - ("email", "Email id for notifications"), - ("vns_params", OrderedDict ([ - ("router_asn", "Router asn value"), - ("mask", "Subnet mask"), - ("gway", "Default gateway for servers in this cluster"), - ("passwd", "Default password for servers in this cluster"), - ("domain", "Default domain for servers in this cluster"), - ("database_dir", "home directory for cassandra"), - ("db_initial_token", "initial database token"), - ("openstack_mgmt_ip", "openstack management ip"), - ("use_certs", "whether to use certificates for auth (True/False)"), - ("multi_tenancy", "Openstack multitenancy (True/False)"), - ("service_token", "Service token for openstack access"), - ("ks_user", "Keystone user name"), - ("ks_passwd", "keystone password"), - ("ks_tenant", "keystone tenant name"), - ("openstack_passwd", "open stack password"), - ("analytics_data_ttl", "analytics data TTL"), - ("osd_bootstrap_key", "OSD Bootstrap Key"), - ("admin_key", "Admin Authentication Key"), - ("storage_mon_secret", "Storage Monitor Secret Key")])) - ]), - "server": OrderedDict ([ - ("server_id", "server id of the server to be modified"), - ("ip", "server ip address"), - ("roles", "comma-separated list of roles for this server"), - ("server_params", OrderedDict([ - ("ifname", "Ethernet Interface name"), - ("compute_non_mgmt_ip", "compute node non mgmt ip (default none)"), - ("compute_non_mgmt_gway", "compute node non mgmt gway (default none)"), - ("disks", "Storage OSDs")])), - ("mask", "subnet mask (default use value from vns table)"), - ("gway", "gateway (default use value from vns table)"), - ("domain", "domain name (default use value from vns table)"), - ("passwd", "root password (default use value from vns table)"), - ("email", "email id for notifications (default use value from vns table)"), - ]), - "image" : OrderedDict ([ - ("image_id", "Image id of image to be modified"), - ("image_version", "Specify version for this image"), - ]), - "cluster" : OrderedDict ([ - ("cluster_id", "cluster id of cluster to be modified"), - ]) -} - -def parse_arguments(args_str=None): - # Process the arguments - if __name__ == "__main__": - parser = argparse.ArgumentParser( - description='''Modify a Server Manager object''' - ) - else: - parser = argparse.ArgumentParser( - description='''Modify a Server Manager object''', - prog="server-manager modify" - ) - # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", - help=("Server manager client config file " - " (default - %s)" %( - smgr_client_def._DEF_SMGR_CFG_FILE))) - subparsers = parser.add_subparsers(title='objects', - description='valid objects', - help='help for object', - dest='object') - - # Subparser for server modify - parser_server = subparsers.add_parser( - "server",help='Modify server') - parser_server.add_argument( - "--file_name", "-f", - help="json file containing server param values") - - # Subparser for vns modify - parser_vns = subparsers.add_parser( - "vns", help='Modify vns') - parser_vns.add_argument( - "--file_name", "-f", - help="json file containing vns param values") - - # Subparser for cluster modify - parser_cluster = subparsers.add_parser( - "cluster", help='Modify cluster') - parser_cluster.add_argument( - "--file_name", "-f", - help="json file containing cluster param values") - - # Subparser for image modify - parser_image = subparsers.add_parser( - "image", help='Modify image') - parser_image.add_argument( - "--file_name", "-f", - help="json file containing image param values") - - args = parser.parse_args(args_str) - return args - -def send_REST_request(ip, port, object, payload): - try: - response = StringIO() - headers = ["Content-Type:application/json"] - url = "http://%s:%s/%s" %( - ip, port, object) - conn = pycurl.Curl() - conn.setopt(pycurl.URL, url) - conn.setopt(pycurl.HTTPHEADER, headers) - conn.setopt(pycurl.POST, 1) - conn.setopt(pycurl.POSTFIELDS, '%s'%json.dumps(payload)) - conn.setopt(pycurl.WRITEFUNCTION, response.write) - conn.perform() - return response.getvalue() - except: - return None - -# Function to accept parameters from user and then build payload to be -# sent with REST API request for creating the object. -def create_payload(object): - payload = {} - objects = [] - while True: - temp_dict = {} - fields_dict = object_dict[object] - for key in fields_dict: - value = fields_dict[key] - if (key != (object+"_params")): - msg = key - if value: - msg += " (%s) " %(value) - msg += ": " - user_input = raw_input(msg) - if user_input: - # Special case for roles - - # store as a list - if key == "roles": - temp_dict[key] = user_input.strip().split(",") - else: - temp_dict[key] = user_input - else: - param_dict = {} - for param in value: - pvalue = value[param] - msg = param - if pvalue: - msg += " (%s) " %(pvalue) - msg += ": " - user_input = "" - if param == 'disks' and 'storage' in temp_dict["roles"]: - disks = raw_input(msg) - if disks: - disk_list = disks.split(',') - user_input = [str(d) for d in disk_list] - else: - user_input = raw_input(msg) - if user_input: - param_dict[param] = user_input - if param_dict: - temp_dict[key] = param_dict - # End if (key != (object+"_params")) - # End for key, value in fields_dict - objects.append(temp_dict) - choice = raw_input("More %s(s) to input? (y/N)" %(object)) - if ((not choice) or - (choice.lower() != "y")): - break; - # End while True - payload[object] = objects - return payload -# End create_payload - -def modify_config(args_str=None): - args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT - else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port - object = args.object - if args.file_name: - payload = json.load(open(args.file_name)) - else: - # Accept parameters and construct json. - payload = create_payload(object) - - resp = send_REST_request(smgr_ip, smgr_port, - object, payload) - print resp -# End of modify_config - -if __name__ == "__main__": - import cgitb - cgitb.enable(format='text') - - modify_config(sys.argv[1:]) -# End if __name__ diff --git a/src/client/smgr_provision_server.py b/src/client/smgr_provision_server.py index 2b2d4d70..ea731d6c 100755 --- a/src/client/smgr_provision_server.py +++ b/src/client/smgr_provision_server.py @@ -38,12 +38,7 @@ def parse_arguments(args_str=None): prog="server-manager provision" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) @@ -53,20 +48,19 @@ def parse_arguments(args_str=None): group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--server_id", help=("server id for the server to be provisioned")) - group.add_argument("--vns_id", - help=("vns id for the server(s) to be provisioned")) group.add_argument("--cluster_id", help=("cluster id for the server(s) to be provisioned")) - group.add_argument("--rack_id", - help=("rack id for the server(s) to be provisioned")) - group.add_argument("--pod_id", - help=("pod id for the server(s) to be provisioned")) + group.add_argument("--tag", + help=("tag values for the servers to be provisioned")) group.add_argument("--provision_params_file", "-f", help=("Optional json file containing parameters " " for provisioning server")) group.add_argument("--interactive", "-I", action="store_true", help=("flag that user wants to enter the server " " parameters for provisioning manually")) + parser.add_argument("--no_confirm", "-F", action="store_true", + help=("flag to bypass confirmation message, " + "default = do not bypass")) args = parser.parse_args(args_str) return args @@ -81,7 +75,9 @@ def get_provision_params(): ("control" , " (Comma separated list of server names for this role) : "), ("collector" , " (Comma separated list of server names for this role) : "), ("webui" , " (Comma separated list of server names for this role) : "), - ("compute" , " (Comma separated list of server names for this role) : ") + ("compute" , " (Comma separated list of server names for this role) : "), + ("storage-compute", " (Comma separated list of server names for this role) : "), + ("storage-master", " (Comma separated list of server names for this role) : ") ]) # Accept all the role definitions print "****** List of role definitions ******" @@ -115,48 +111,36 @@ def send_REST_request(ip, port, payload): def provision_server(args_str=None): args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except provision_params = {} match_key = None match_param = None if args.server_id: - match_key='server_id' + match_key='id' match_value = args.server_id - elif args.vns_id: - match_key='vns_id' - match_value = args.vns_id elif args.cluster_id: match_key='cluster_id' match_value = args.cluster_id - elif args.rack_id: - match_key='rack_id' - match_value = args.rack_id - elif args.pod_id: - match_key='pod_id' - match_value = args.pod_id + elif args.tag: + match_key='tag' + match_value = args.tag elif args.interactive: provision_params = get_provision_params() elif args.provision_params_file: @@ -170,7 +154,19 @@ def provision_server(args_str=None): if match_key: payload[match_key] = match_value if provision_params: - payload['provision_params'] = provision_params + payload['provision_parameters'] = provision_params + + if (not args.no_confirm): + if match_key: + msg = "Provision servers (%s:%s) with %s? (y/N) :" %( + match_key, match_value, args.package_image_id) + else: + msg = "Provision servers with %s? (y/N) :" %( + args.package_image_id) + user_input = raw_input(msg).lower() + if user_input not in ["y", "yes"]: + sys.exit() + # end if resp = send_REST_request(smgr_ip, smgr_port, payload) diff --git a/src/client/smgr_reimage_server.py b/src/client/smgr_reimage_server.py index 9a6024af..e5ea1d8a 100755 --- a/src/client/smgr_reimage_server.py +++ b/src/client/smgr_reimage_server.py @@ -38,12 +38,7 @@ def parse_arguments(args_str=None): prog="server-manager reimage" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) @@ -59,14 +54,14 @@ def parse_arguments(args_str=None): group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--server_id", help=("server id for the server to be reimaged")) - group.add_argument("--vns_id", - help=("vns id for the server(s) to be reimaged")) group.add_argument("--cluster_id", help=("cluster id for the server(s) to be reimaged")) - group.add_argument("--rack_id", - help=("rack id for the server(s) to be reimaged")) - group.add_argument("--pod_id", - help=("pod id for the server(s) to be reimaged")) + group.add_argument("--tag", + help=("tag values for the servers to be reimaged" + "in t1=v1,t2=v2,... format")) + parser.add_argument("--no_confirm", "-F", action="store_true", + help=("flag to bypass confirmation message, " + "default = do not bypass")) args = parser.parse_args(args_str) return args # end parse arguments @@ -90,47 +85,35 @@ def send_REST_request(ip, port, payload): def reimage_server(args_str=None): args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except match_key = None match_value = None if args.server_id: - match_key='server_id' + match_key='id' match_value = args.server_id - elif args.vns_id: - match_key='vns_id' - match_value = args.vns_id elif args.cluster_id: match_key='cluster_id' match_value = args.cluster_id - elif args.rack_id: - match_key='rack_id' - match_value = args.rack_id - elif args.pod_id: - match_key='pod_id' - match_value = args.pod_id + elif args.tag: + match_key='tag' + match_value = args.tag else: pass @@ -141,6 +124,14 @@ def reimage_server(args_str=None): payload['no_reboot'] = "y" if match_key: payload[match_key] = match_value + + if (not args.no_confirm): + msg = "Reimage servers (%s:%s) with %s? (y/N) :" %( + match_key, match_value, args.base_image_id) + user_input = raw_input(msg).lower() + if user_input not in ["y", "yes"]: + sys.exit() + # end if resp = send_REST_request(smgr_ip, smgr_port, payload) diff --git a/src/client/smgr_restart_server.py b/src/client/smgr_restart_server.py index 26714ad4..13527efe 100755 --- a/src/client/smgr_restart_server.py +++ b/src/client/smgr_restart_server.py @@ -38,29 +38,24 @@ def parse_arguments(args_str=None): prog="server-manager restart" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--server_id", - help=("server id for the server to be provisioned")) - group.add_argument("--vns_id", - help=("vns id for the server(s) to be provisioned")) + help=("server id for the server to be restarted")) group.add_argument("--cluster_id", - help=("cluster id for the server(s) to be provisioned")) - group.add_argument("--rack_id", - help=("rack id for the server(s) to be provisioned")) - group.add_argument("--pod_id", - help=("pod id for the server(s) to be provisioned")) + help=("cluster id for the server(s) to be restarted")) + group.add_argument("--tag", + help=("tag values for the servers to be restarted" + "in t1=v1,t2=v2,... format")) parser.add_argument("--net_boot", "-n", action="store_true", help=("optional parameter to indicate" " if server should be netbooted.")) + parser.add_argument("--no_confirm", "-F", action="store_true", + help=("flag to bypass confirmation message, " + "default = do not bypass")) args = parser.parse_args(args_str) return args # end def parse_arguments @@ -85,46 +80,34 @@ def send_REST_request(ip, port, payload): def restart_server(args_str=None): args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except match_key = None match_param = None if args.server_id: - match_key='server_id' + match_key='id' match_value = args.server_id - elif args.vns_id: - match_key='vns_id' - match_value = args.vns_id elif args.cluster_id: match_key='cluster_id' match_value = args.cluster_id - elif args.rack_id: - match_key='rack_id' - match_value = args.rack_id - elif args.pod_id: - match_key='pod_id' - match_value = args.pod_id + elif args.tag: + match_key='tag' + match_value = args.tag else: pass @@ -133,6 +116,14 @@ def restart_server(args_str=None): payload[match_key] = match_value if (args.net_boot): payload['net_boot'] = "y" + + if (not args.no_confirm): + msg = "Restart servers (%s:%s)? (y/N) :" %( + match_key, match_value) + user_input = raw_input(msg).lower() + if user_input not in ["y", "yes"]: + sys.exit() + # end if resp = send_REST_request(smgr_ip, smgr_port, payload) diff --git a/src/client/smgr_show.py b/src/client/smgr_show.py index 74f92949..2e1f66de 100755 --- a/src/client/smgr_show.py +++ b/src/client/smgr_show.py @@ -6,7 +6,7 @@ Author : Abhay Joshi Description : This program is a simple cli interface to get server manager configuration objects. - Objects can be vns, cluster, server, or image. + Objects can be cluster, server, or image. An optional parameter details is used to indicate if user wants to fetch details of the object. """ @@ -32,17 +32,10 @@ def parse_arguments(): prog="server-manager show" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) - parser.add_argument("--detail", "-d", action='store_true', - help="Flag to indicate if details are requested") subparsers = parser.add_subparsers(title='objects', description='valid objects', help='help for object') @@ -57,28 +50,27 @@ def parse_arguments(): help=("mac address for server")) group.add_argument("--ip", help=("ip address for server")) - group.add_argument("--vns_id", - help=("vns id for server(s)")) group.add_argument("--cluster_id", help=("cluster id for server(s)")) - group.add_argument("--rack_id", - help=("rack id for server(s)")) - group.add_argument("--pod_id", - help=("pod id for server(s)")) + group.add_argument("--tag", + help=("tag values for the server" + "in t1=v1,t2=v2,... format")) + group.add_argument("--discovered", + help=("flag to get list of " + "newly discovered server(s)")) + parser_server.add_argument( + "--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") parser_server.set_defaults(func=show_server) - # Subparser for vns show - parser_vns = subparsers.add_parser( - "vns", help='Show vns') - parser_vns.add_argument("--vns_id", - help=("vns id for vns")) - parser_vns.set_defaults(func=show_vns) - # Subparser for cluster show parser_cluster = subparsers.add_parser( "cluster", help='Show cluster') parser_cluster.add_argument("--cluster_id", help=("cluster id for cluster")) + parser_cluster.add_argument( + "--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") parser_cluster.set_defaults(func=show_cluster) # Subparser for image show @@ -86,12 +78,23 @@ def parse_arguments(): "image", help='Show image') parser_image.add_argument("--image_id", help=("image id for image")) + parser_image.add_argument( + "--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") parser_image.set_defaults(func=show_image) # Subparser for all show parser_all = subparsers.add_parser( - "all", help='Show all configuration (servers,vns,clusters, images)') + "all", help='Show all configuration (servers, clusters, images, tags)') + parser_all.add_argument( + "--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") parser_all.set_defaults(func=show_all) + + # Subparser for tags show + parser_tag = subparsers.add_parser( + "tag", help='Show list of server tags') + parser_tag.set_defaults(func=show_tag) return parser # end def parse_arguments @@ -124,50 +127,32 @@ def show_server(args): rest_api_params = {} rest_api_params['object'] = 'server' if args.server_id: - rest_api_params['match_key'] = 'server_id' + rest_api_params['match_key'] = 'id' rest_api_params['match_value'] = args.server_id elif args.mac: - rest_api_params['match_key'] = 'mac' + rest_api_params['match_key'] = 'mac_address' rest_api_params['match_value'] = args.mac elif args.ip: - rest_api_params['match_key'] = 'ip' + rest_api_params['match_key'] = 'ip_address' rest_api_params['match_value'] = args.ip - elif args.vns_id: - rest_api_params['match_key'] = 'vns_id' - rest_api_params['match_value'] = args.vns_id elif args.cluster_id: rest_api_params['match_key'] = 'cluster_id' rest_api_params['match_value'] = args.cluster_id - elif args.rack_id: - rest_api_params['match_key'] = 'rack_id' - rest_api_params['match_value'] = args.rack_id - elif args.pod_id: - rest_api_params['match_key'] = 'pod_id' - rest_api_params['match_value'] = args.pod_id + elif args.tag: + rest_api_params['match_key'] = 'tag' + rest_api_params['match_value'] = args.tag + elif args.discovered: + rest_api_params['match_key'] = 'discovered' + rest_api_params['match_value'] = args.discovered else: rest_api_params['match_key'] = None rest_api_params['match_value'] = None return rest_api_params #end def show_server -def show_vns(args): - if args.vns_id: - match_key = 'vns_id' - match_value = args.vns_id - else: - match_key = None - match_value = None - rest_api_params = { - 'object' : 'vns', - 'match_key' : match_key, - 'match_value' : match_value - } - return rest_api_params -#end def show_vns - def show_cluster(args): if args.cluster_id: - match_key = 'cluster_id' + match_key = 'id' match_value = args.cluster_id else: match_key = None @@ -182,7 +167,7 @@ def show_cluster(args): def show_image(args): if args.image_id: - match_key = 'image_id' + match_key = 'id' match_value = args.image_id else: match_key = None @@ -204,38 +189,45 @@ def show_all(args): return rest_api_params #end def show_all +def show_tag(args): + rest_api_params = { + 'object' : 'tag', + 'match_key' : None, + 'match_value' : None + } + return rest_api_params +#end def show_all + def show_config(args_str=None): parser = parse_arguments() args = parser.parse_args(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file + else: + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + if hasattr(args, 'detail'): + detail = args.detail else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + detail = None + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except rest_api_params = args.func(args) resp = send_REST_request(smgr_ip, smgr_port, rest_api_params['object'], rest_api_params['match_key'], rest_api_params['match_value'], - args.detail) + detail) smgr_client_def.print_rest_response(resp) # End of show_config diff --git a/src/client/smgr_status.py b/src/client/smgr_status.py index 21ae2f28..583547c4 100755 --- a/src/client/smgr_status.py +++ b/src/client/smgr_status.py @@ -3,63 +3,65 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 """ Name : smgr_status.py - Author : Prasad Miriyala & Bharat Putta + Author : Abhay Joshi Description : This program is a simple cli interface to - get status of a server or all the servers in a VNS. + get server manager configuration objects. + Objects can be cluster, server, or image. + An optional parameter details is used to indicate if user + wants to fetch details of the object. """ import argparse -import cgitb +import pdb import sys import pycurl from StringIO import StringIO import ConfigParser -import json import smgr_client_def +import json + def parse_arguments(): # Process the arguments if __name__ == "__main__": parser = argparse.ArgumentParser( - description='''Show a Server Manager object''' + description='''Show a Server's Status''' ) else: parser = argparse.ArgumentParser( - description='''Show a Server Manager object''', + description='''Show a Servers status''', prog="server-manager status" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) - parser.add_argument("--detail", "-d", action='store_true', - help="Flag to indicate if details are requested") subparsers = parser.add_subparsers(title='objects', description='valid objects', help='help for object') - # Subparser for server status + # Subparser for server show parser_server = subparsers.add_parser( - "server",help='Status server') + "server",help='Show server status') group = parser_server.add_mutually_exclusive_group() group.add_argument("--server_id", help=("server id for server")) - parser_server.set_defaults(get_rest_params=server_rest_params) - parser_server.set_defaults(get_status=get_server_status) - - - # Subparser for vns show - parser_vns = subparsers.add_parser( - "vns", help='Status vns') - parser_vns.add_argument("--vns_id", - help=("vns id for vns")) - parser_vns.set_defaults(get_rest_params=vns_rest_params) - parser_vns.set_defaults(get_status=get_vns_status) + group.add_argument("--mac", + help=("mac address for server")) + group.add_argument("--ip", + help=("ip address for server")) + group.add_argument("--cluster_id", + help=("cluster id for server(s)")) + group.add_argument("--tag", + help=("tag values for the server" + "in t1=v1,t2=v2,... format")) + group.add_argument("--discovered", + help=("flag to get list of " + "newly discovered server(s)")) + parser_server.add_argument( + "--detail", "-d", action='store_true', + help="Flag to indicate if details are requested") + parser_server.set_defaults(func=set_server_status) return parser # end def parse_arguments @@ -89,125 +91,70 @@ def send_REST_request(ip, port, object, match_key, return None # end def send_REST_request -def server_rest_params(args): +def set_server_status(args): rest_api_params = {} - rest_api_params['object'] = 'status' + rest_api_params['object'] = 'server' if args.server_id: - rest_api_params['match_key'] = 'server_id' + rest_api_params['match_key'] = 'id' rest_api_params['match_value'] = args.server_id + elif args.mac: + rest_api_params['match_key'] = 'mac_address' + rest_api_params['match_value'] = args.mac + elif args.ip: + rest_api_params['match_key'] = 'ip_address' + rest_api_params['match_value'] = args.ip + elif args.cluster_id: + rest_api_params['match_key'] = 'cluster_id' + rest_api_params['match_value'] = args.cluster_id + elif args.tag: + rest_api_params['match_key'] = 'tag' + rest_api_params['match_value'] = args.tag + elif args.discovered: + rest_api_params['match_key'] = 'discovered' + rest_api_params['match_value'] = args.discovered else: rest_api_params['match_key'] = None rest_api_params['match_value'] = None return rest_api_params -#end def server_status_rest_params +#end def show_server -def vns_rest_params(args): - rest_api_params = {} - rest_api_params['object'] = 'server' - if args.vns_id: - rest_api_params['match_key'] = 'vns_id' - rest_api_params['match_value'] = args.vns_id - else: - rest_api_params['match_key'] = None - rest_api_params['match_value'] = None - return rest_api_params -#end def vns_status_rest_params -def get_obj(resp): +def show_server_status(args_str=None): + parser = parse_arguments() + args = parser.parse_args(args_str) + if args.config_file: + config_file = args.config_file + else: + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + if hasattr(args, 'detail'): + detail = args.detail + else: + detail = None try: - data = json.loads(resp) - return data - except ValueError: - return {} -#end def get_obj - -def get_server_status(args, smgr_ip, smgr_port): - rest_api_params = args.get_rest_params(args) + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except + rest_api_params = args.func(args) resp = send_REST_request(smgr_ip, smgr_port, - rest_api_params['object'], + "server_status", rest_api_params['match_key'], rest_api_params['match_value'], - args.detail) - if resp is not None: - status = get_obj(resp) - if 'server_status' not in status: - return - server_status = status['server_status'] - modified_status = server_status.replace('active', 'active\n') \ - .replace('failed', 'failed\n') \ - .replace('STARTIN', 'STARTIN\n') \ - .replace('BACKOFF', 'BACKOFF\n') \ - .replace( ' ==', ' ==\n') \ - .replace('NOT PRESENT', 'NOT PRESENT\n')\ - .replace('EXITED', 'EXITED\n') - print modified_status -#end def get_server_status - -def get_vns_status(args, smgr_ip, smgr_port): - rest_api_params = args.get_rest_params(args) - resp = send_REST_request(smgr_ip, smgr_port, - rest_api_params['object'], - rest_api_params['match_key'], - rest_api_params['match_value'], - args.detail) - servers = json.loads(resp)['server'] - for server in servers: - server_id = server['server_id'] - server_resp = send_REST_request(smgr_ip, smgr_port, - 'status', - 'server_id', - server_id.encode('ascii','ignore'), - args.detail) - if server_resp is None: - continue - status = get_obj(server_resp) - if 'server_status' not in status: - continue - server_status = status['server_status'] - modified_status = server_status.replace('active', 'active\n') \ - .replace('failed', 'failed\n') \ - .replace('STARTIN', 'STARTIN\n') \ - .replace('BACKOFF', 'BACKOFF\n') \ - .replace( ' ==', ' ==\n') \ - .replace('NOT PRESENT', 'NOT PRESENT\n') \ - .replace('EXITED', 'EXITED\n') - print ("Server %s status:") % (server_id) - print modified_status - print "\n" - -#end def get_vns_status - -def show_status(args_str=None): - parser = parse_arguments() - args = parser.parse_args(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT - else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port - args.get_status(args, smgr_ip, smgr_port) -# End of show_status - + detail) + smgr_client_def.print_rest_response(resp) +# End of show_config if __name__ == "__main__": + import cgitb cgitb.enable(format='text') + show_config(sys.argv[1:]) # End if __name__ diff --git a/src/client/smgr_upload_image.py b/src/client/smgr_upload_image.py index 6e980d0a..3eab2a71 100755 --- a/src/client/smgr_upload_image.py +++ b/src/client/smgr_upload_image.py @@ -31,12 +31,7 @@ def parse_arguments(args_str=None): prog="server-manager uplaod_image" ) # end else - group1 = parser.add_mutually_exclusive_group() - group1.add_argument("--ip_port", "-i", - help=("ip addr & port of server manager " - "[:] format, default port " - " 9001")) - group1.add_argument("--config_file", "-c", + parser.add_argument("--config_file", "-c", help=("Server manager client config file " " (default - %s)" %( smgr_client_def._DEF_SMGR_CFG_FILE))) @@ -74,36 +69,30 @@ def send_REST_request(ip, port, payload, file_name): def upload_image(args_str=None): args = parse_arguments(args_str) - if args.ip_port: - smgr_ip, smgr_port = args.ip_port.split(":") - if not smgr_port: - smgr_port = smgr_client_def._DEF_SMGR_PORT + if args.config_file: + config_file = args.config_file else: - if args.config_file: - config_file = args.config_file - else: - config_file = smgr_client_def._DEF_SMGR_CFG_FILE - # end args.config_file - try: - config = ConfigParser.SafeConfigParser() - config.read([config_file]) - smgr_config = dict(config.items("SERVER-MANAGER")) - smgr_ip = smgr_config.get("listen_ip_addr", None) - if not smgr_ip: - sys.exit(("listen_ip_addr missing in config file" - "%s" %config_file)) - smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) - except: - sys.exit("Error reading config file %s" %config_file) - # end except - # end else args.ip_port + config_file = smgr_client_def._DEF_SMGR_CFG_FILE + # end args.config_file + try: + config = ConfigParser.SafeConfigParser() + config.read([config_file]) + smgr_config = dict(config.items("SERVER-MANAGER")) + smgr_ip = smgr_config.get("listen_ip_addr", None) + if not smgr_ip: + sys.exit(("listen_ip_addr missing in config file" + "%s" %config_file)) + smgr_port = smgr_config.get("listen_port", smgr_client_def._DEF_SMGR_PORT) + except: + sys.exit("Error reading config file %s" %config_file) + # end except image_id = args.image_id image_version = args.image_version image_type = args.image_type payload = { - 'image_id' : image_id, - 'image_version' : image_version, - 'image_type' : image_type + 'id' : image_id, + 'version' : image_version, + 'type' : image_type } file_name = args.file_name diff --git a/src/client/storage-cluster.json b/src/client/storage-cluster.json new file mode 100644 index 00000000..fe18de58 --- /dev/null +++ b/src/client/storage-cluster.json @@ -0,0 +1,29 @@ +{ + "cluster" : [ + { + "id" : "nitish-cluster", + "parameters" : { + "router_asn": "64512", + "database_dir": "/home/cassandra", + "database_token": "", + "use_certificates": "False", + "multi_tenancy": "False", + "encapsulation_priority": "MPLSoUDP,MPLSoGRE,VXLAN", + "service_token": "contrail123", + "keystone_username": "admin", + "keystone_password": "contrail123", + "keystone_tenant": "admin", + "analytics_data_ttl": "168", + "haproxy": "disable", + "subnet_mask": "255.255.255.240", + "gateway": "10.204.221.46", + "password": "c0ntrail123", + "external_bgp": "", + "domain": "englab.juniper.net", + "storage_mon_secret": "AQBM78tTEMz+GhAA3WiOXQI7UVdIy0YFFuTGdw==", + "osd_bootstrap_key": "AQCq7NFTeJUoBhAAlTVpxwWQJtBej/JDNhT6+Q==", + "admin_key": "AQDIgtNTgPLWARAAK6gs/fj8m88LnY9DwxJdYA==" + } + } + ] +} diff --git a/src/client/storage-server.json b/src/client/storage-server.json index 118bb47b..ec785a24 100644 --- a/src/client/storage-server.json +++ b/src/client/storage-server.json @@ -1,22 +1,19 @@ { "server": [ { - "server_id": "nitish-test-server", - "mac": "52:54:00:30:FB:AB", - "ip": "192.168.121.101", - "server_params" : { - "ifname": "eth1", - "compute_non_mgmt_ip": "", + "id": "nitish-test-server", + "mac_address": "52:54:00:30:FB:AB", + "ip_address": "192.168.121.101", + "parameters" : { + "interface_name": "eth1", "storage_repo_id": "xyz_12", - "compute_non_mgmt_gway": "", "disks": ["/dev/sda","/dev/sdb"] }, - "roles" : ["config","compute","openstack","control","collector","webui","database","storage","storage-mgr"], + "roles" : ["config","compute","openstack","control","collector","webui","database","storage-compute","storage-master"], "cluster_id": "nitish-cluster", - "vns_id": "nitish-test-vns", - "mask": "255.255.255.0", - "gway": "10.204.221.46", - "passwd": "juniper", + "subnet_mask": "255.255.255.0", + "gateway": "10.204.221.46", + "password": "juniper", "domain": "demo.juniper.net", "power_address": "10.84.6.63" } diff --git a/src/client/storage-vns.json b/src/client/storage-vns.json deleted file mode 100644 index 64921657..00000000 --- a/src/client/storage-vns.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "vns" : [ - { - "vns_id" : "nitish-test-vns", - "vns_params" : { - "router_asn": "64512", - "database_dir": "/home/cassandra", - "db_initial_token": "", - "openstack_mgmt_ip": "", - "use_certs": "False", - "multi_tenancy": "False", - "encap_priority": "MPLSoUDP,MPLSoGRE,VXLAN", - "service_token": "contrail123", - "ks_user": "admin", - "ks_passwd": "contrail123", - "ks_tenant": "admin", - "openstack_passwd": "juniper", - "analytics_data_ttl": "168", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "", - "haproxy": "disable", - "mask": "255.255.255.240", - "gway": "10.204.221.46", - "passwd": "c0ntrail123", - "ext_bgp": "", - "domain": "englab.juniper.net", - "storage_mon_secret": "AQBM78tTEMz+GhAA3WiOXQI7UVdIy0YFFuTGdw==", - "osd_bootstrap_key": "AQCq7NFTeJUoBhAAlTVpxwWQJtBej/JDNhT6+Q==", - "admin_key": "AQDIgtNTgPLWARAAK6gs/fj8m88LnY9DwxJdYA==" - } - } - ] -} diff --git a/src/client/tag.json b/src/client/tag.json new file mode 100644 index 00000000..e1869c73 --- /dev/null +++ b/src/client/tag.json @@ -0,0 +1,10 @@ +{ + "tag1" : "data-center", + "tag2" : "floor", + "tag3" : "room", + "tag4" : "pod", + "tag5" : "rack", + "tag6" : "user-defined-1", + "tag7" : "user-defined-2" +} + diff --git a/src/client/vns-5-node-interface.json b/src/client/vns-5-node-interface.json deleted file mode 100644 index 65f27f12..00000000 --- a/src/client/vns-5-node-interface.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "vns" : [ - { - "vns_id" : "5-node", - "vns_params" : { - "router_asn": "64512", - "database_dir": "/home/cassandra", - "db_initial_token": "", - "openstack_mgmt_ip": "", - "use_certs": "False", - "multi_tenancy": "False", - "encap_priority": "MPLSoUDP,MPLSoGRE,VXLAN", - "service_token": "contrail123", - "ks_user": "admin", - "ks_passwd": "contrail123", - "ks_tenant": "admin", - "openstack_passwd": "contrail123", - "analytics_data_ttl": "168", - "mask": "255.255.255.240", - "gway": "10.204.221.62", - "passwd": "c0ntrail123", - "haproxy": "disable", - "ext_bgp": "", - "domain": "englab.juniper.net" - } - } - ] -} - diff --git a/src/client/vns.json b/src/client/vns.json deleted file mode 100644 index a2a60aac..00000000 --- a/src/client/vns.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "vns" : [ - { - "vns_id" : "demo-vns", - "vns_params" : { - "router_asn": "64512", - "database_dir": "/home/cassandra", - "db_initial_token": "", - "openstack_mgmt_ip": "", - "use_certs": "False", - "multi_tenancy": "False", - "encap_priority": "MPLSoUDP,MPLSoGRE,VXLAN", - "service_token": "contrail123", - "ks_user": "admin", - "ks_passwd": "contrail123", - "ks_tenant": "admin", - "openstack_passwd": "juniper", - "analytics_data_ttl": "168", - "compute_non_mgmt_ip": "", - "compute_non_mgmt_gway": "", - "haproxy": "disable", - "mask": "255.255.255.240", - "gway": "10.204.221.46", - "passwd": "c0ntrail123", - "ext_bgp": "", - "domain": "englab.juniper.net" - } - } - ] -} - diff --git a/src/cobbler/dhcp.template b/src/cobbler/dhcp.template index c67cdb2f..e447cd83 100644 --- a/src/cobbler/dhcp.template +++ b/src/cobbler/dhcp.template @@ -19,10 +19,10 @@ set vendorclass = option vendor-class-identifier; subnet 10.204.217.0 netmask 255.255.255.0 { option routers 10.204.217.254; option subnet-mask 255.255.255.0; - option domain-name-servers 10.204.208.221, 8.8.8.8; + option domain-name-servers $next_server, 8.8.8.8; option domain-search "englab.juniper.net", "juniper.net"; option domain-name "englab.juniper.net" ; - option ntp-servers 172.17.28.5 ; + option ntp-servers $next_server ; default-lease-time 21600; max-lease-time 43200; next-server $next_server; diff --git a/src/cobbler/dhcp.template.u b/src/cobbler/dhcp.template.u index 7b22489d..80000af5 100644 --- a/src/cobbler/dhcp.template.u +++ b/src/cobbler/dhcp.template.u @@ -19,10 +19,10 @@ set vendorclass = option vendor-class-identifier; subnet 10.204.217.0 netmask 255.255.255.0 { option routers 10.204.217.254; option subnet-mask 255.255.255.0; - option domain-name-servers 10.204.208.221, 8.8.8.8; + option domain-name-servers $next_server, 8.8.8.8; option domain-search "englab.juniper.net", "juniper.net"; option domain-name "englab.juniper.net" ; - option ntp-servers 172.17.28.5 ; + option ntp-servers $next_server ; default-lease-time 21600; max-lease-time 43200; next-server $next_server; @@ -31,70 +31,37 @@ subnet 10.204.217.0 netmask 255.255.255.0 { on commit { set clip = binary-to-ascii(10, 8, ".", leased-address); set clhw = binary-to-ascii(16, 8, ":", substring(hardware, 1, 6)); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "commit", clip, clhw); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "commit", clip, clhw); set ClientHost = pick-first-value(host-decl-name, option fqdn.hostname, option host-name, "none"); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "commit", clip, clhw, ClientHost); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "commit", clip, clhw, ClientHost); } on release { set clip = binary-to-ascii(10, 8, ".", leased-address); set clhw = binary-to-ascii(16, 8, ":", substring(hardware, 1, 6)); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "release", clip, clhw); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "release", clip, clhw); set ClientHost = pick-first-value(host-decl-name, option fqdn.hostname, option host-name, "none"); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "release", clip, clhw, ClientHost); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "release", clip, clhw, ClientHost); } on expiry { set clip = binary-to-ascii(10, 8, ".", leased-address); set clhw = binary-to-ascii(16, 8, ":", substring(hardware, 1, 6)); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "expiry", clip, clhw); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "expiry", clip, clhw); set ClientHost = pick-first-value(host-decl-name, option fqdn.hostname, option host-name, "none"); - execute("/usr/bin/server_manager/smgr_dhcp_event.py", "expiry", clip, clhw, ClientHost); + execute("/opt/contrail/server_manager/smgr_dhcp_event.py", "expiry", clip, clhw, ClientHost); } } -host nodeg31.englab.juniper.net { - hardware ethernet 00:25:90:C5:5F:6A; - fixed-address 10.204.217.71; - option host-name "nodeg31"; - filename "/pxelinux.0"; - next-server $next_server; -} - - - -host nodeg34.englab.juniper.net { - hardware ethernet 00:25:90:C5:59:34; - fixed-address 10.204.217.74; - option host-name "nodeg34"; - filename "/pxelinux.0"; - next-server $next_server; -} -host nodeg35.englab.juniper.net { - hardware ethernet 00:25:90:C5:62:54; - fixed-address 10.204.217.75; - option host-name "nodeg35"; - filename "/pxelinux.0"; - next-server $next_server; -} -host nodeg36.englab.juniper.net { - hardware ethernet 00:25:90:C5:5B:A0; - fixed-address 10.204.217.76; - option host-name "nodeg36"; - filename "/pxelinux.0"; - next-server $next_server; -} - - #for dhcp_tag in $dhcp_tags.keys(): ## group could be subnet if your dhcp tags line up with your subnets ## or really any valid dhcpd.conf construct ... if you only use the diff --git a/src/cobbler/named.template b/src/cobbler/named.template new file mode 100644 index 00000000..2879f4a3 --- /dev/null +++ b/src/cobbler/named.template @@ -0,0 +1,30 @@ +options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + allow-recursion { any; }; + recursion yes; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +#for $zone in $forward_zones +zone "${zone}." { + type master; + file "$zone"; +}; + +#end for +#for $zone, $arpa in $reverse_zones +zone "${arpa}." { + type master; + file "$zone"; +}; + +#end for diff --git a/src/contrail_smgrd b/src/contrail-server-manager similarity index 80% rename from src/contrail_smgrd rename to src/contrail-server-manager index c7d9cd10..824d797f 100755 --- a/src/contrail_smgrd +++ b/src/contrail-server-manager @@ -2,9 +2,9 @@ # # chkconfig: 2345 55 45 # description: The server mangement daemon maintains all the servers belonging to contrail ecosystem. -# processname: contrail_smgrd -# config: /etc/sysconfig/contrail_smgrd -# pidfile: /var/run/contrail_smgrd/contrail_smgrd.pid +# processname: contrail-server-manager +# config: /etc/sysconfig/contrail-server-manager +# pidfile: /var/run/contrail-server-manager/contrail-server-manager.pid #SYSTEMCTL_SKIP_REDIRECT=1 @@ -14,7 +14,7 @@ USER=root RETVAL=0 -prog="contrail_smgrd" +prog="contrail-server-manager" pidfile=${PIDFILE-/var/run/$prog/$prog.pid} if [ -f /etc/sysconfig/$prog ];then @@ -33,7 +33,7 @@ start () { if [ "`stat -c %U $piddir`" != "$USER" ]; then chown $USER $piddir fi - daemon --pidfile ${pidfile} /usr/sbin/contrail_smgrd + daemon --pidfile ${pidfile} /usr/sbin/contrail-server-manager RETVAL=$? } stop () { diff --git a/src/contrail_smgr_client.spec b/src/contrail-server-manager-client.spec similarity index 95% rename from src/contrail_smgr_client.spec rename to src/contrail-server-manager-client.spec index 9d7c2b9b..0c4fcf90 100644 --- a/src/contrail_smgr_client.spec +++ b/src/contrail-server-manager-client.spec @@ -77,7 +77,7 @@ cp %{_contrail_smgr_src}utils/create_smgr_db.py %{buildroot}%{_contrailopt}%{_co cp %{_contrail_smgr_src}client/server-manager %{buildroot}%{_contrailopt}%{_contrail_smgr}/client -cp -r %{_contrail_smgr_src}client/smgr_client_config.ini %{buildroot}%{_contrailopt}%{_contrail_smgr}/client +cp -r %{_contrail_smgr_src}client/sm-client-config.ini %{buildroot}%{_contrailopt}%{_contrail_smgr}/client %clean rm -rf %{buildroot} diff --git a/src/contrail_smgr.spec b/src/contrail-server-manager.spec similarity index 88% rename from src/contrail_smgr.spec rename to src/contrail-server-manager.spec index 08dafffc..43300da5 100644 --- a/src/contrail_smgr.spec +++ b/src/contrail-server-manager.spec @@ -21,7 +21,7 @@ %define _pysitepkg /usr/lib/python%{_pyver}/site-packages -Name: contrail_smgr +Name: contrail-server-manager Version: 1.0 Release: 1 Summary: A server manager @@ -110,18 +110,18 @@ service postfix stop service sendmail restart sed -i "s/10.84.51.11/$HOST_IP/" /etc/cobbler/settings -/sbin/chkconfig --add contrail_smgrd -sed -i "s/authn_denyall/authn_testing/g" /etc/cobbler/modules.conf -sed -i "s/127.0.0.1/$HOST_IP/g" /opt/contrail/server_manager/smgr_config.ini +/sbin/chkconfig --add contrail-server-manager +sed -i "s/module = authn_.*/module = authn_configfile/g" /etc/cobbler/modules.conf +sed -i "s/127.0.0.1/$HOST_IP/g" /opt/contrail/server_manager/sm-config.ini chkconfig httpd on chkconfig puppetmaster on -chkconfig contrail_smgrd on +chkconfig contrail-server-manager on chkconfig puppet on -#service contrail_smgrd restart +#service contrail-server-manager restart %build %install @@ -148,11 +148,14 @@ cp %{_contrail_smgr_src}server_mgr_cobbler.py %{buildroot}%{_contrailopt}%{_cont cp %{_contrail_smgr_src}server_mgr_puppet.py %{buildroot}%{_contrailopt}%{_contrail_smgr} cp %{_contrail_smgr_src}server_mgr_exception.py %{buildroot}%{_contrailopt}%{_contrail_smgr} cp %{_contrail_smgr_src}server_mgr_logger.py %{buildroot}%{_contrailopt}%{_contrail_smgr} +cp %{_contrail_smgr_src}server_mgr_status.py %{buildroot}%{_contrailopt}%{_contrail_smgr} cp %{_contrail_smgr_src}smgr_dhcp_event.py %{buildroot}%{_contrailopt}%{_contrail_smgr} + cp %{_contrail_smgr_src}server_mgr_defaults.py %{buildroot}%{_contrailopt}%{_contrail_smgr} cp %{_contrail_smgr_src}utils/send_mail.py %{buildroot}%{_contrailopt}%{_contrail_smgr} -cp %{_contrail_smgr_src}smgr_config.ini %{buildroot}%{_contrailopt}%{_contrail_smgr} +cp %{_contrail_smgr_src}sm-config.ini %{buildroot}%{_contrailopt}%{_contrail_smgr} +cp %{_contrail_smgr_src}tags.ini %{buildroot}%{_contrailetc} cp %{_contrail_smgr_src}logger.conf %{buildroot}%{_contrailopt}%{_contrail_smgr} cp %{_contrail_smgr_src}%{_vmware}esxi_contrailvm.py %{buildroot}%{_contrailopt}%{_contrail_smgr} @@ -160,12 +163,12 @@ cp %{_contrail_smgr_src}%{_vmware}esxi_contrailvm.py %{buildroot}%{_contrailopt} cp %{_contrail_smgr_src}third_party/bottle.py %{buildroot}%{_contrailopt}%{_contrail_smgr} -cp %{_contrail_smgr_src}contrail_smgrd %{buildroot}%{_initdetc} +cp %{_contrail_smgr_src}contrail-server-manager %{buildroot}%{_initdetc} cp -r %{_contrail_smgr_src}/puppet %{buildroot}%{_contrailetc} cp -r %{_contrail_smgr_src}repos/contrail-centos-repo %{buildroot}%{_contrailetc} cp -r %{_contrail_smgr_src}cobbler %{buildroot}%{_contrailetc} cp -r %{_contrail_smgr_src}kickstarts %{buildroot}%{_contrailetc} -cp %{_contrail_smgr_src}contrail_smgrd.start %{buildroot}%{_sbinusr}contrail_smgrd +cp %{_contrail_smgr_src}contrail-server-manager.start %{buildroot}%{_sbinusr}contrail-server-manager cp %{_contrail_smgr_src}utils/sendmail.cf %{buildroot}%{_contrailetc} #install -p -m 755 %{_contrail_smgr_src}cobbler/dhcp.template %{buildroot}%{_bindir}%{_contrail_smgr} @@ -183,7 +186,7 @@ rm -rf %{buildroot} #%config(noreplace) %{_sysconfdir}/%{name}/%{name}.conf %{_contrailopt}/* /usr/sbin/* -/etc/init.d/contrail_smgrd +/etc/init.d/contrail-server-manager %{_contrailetc}/* #/etc/cobbler/dhcp.template #/etc/cobbler/dhcp.template diff --git a/src/contrail_smgrd.start b/src/contrail-server-manager.start similarity index 75% rename from src/contrail_smgrd.start rename to src/contrail-server-manager.start index a40299e1..6a55f3e4 100755 --- a/src/contrail_smgrd.start +++ b/src/contrail-server-manager.start @@ -17,8 +17,8 @@ cobbler sync service xinetd restart service ntpd restart -mkdir -p /var/run/contrail_smgrd/ -nohup /opt/contrail/server_manager/server_mgr_main.py -c /opt/contrail/server_manager/smgr_config.ini 0<&- &> /var/run/contrail_smgrd/contrail_smgrd.log& +mkdir -p /var/run/contrail-server-manager/ +nohup /opt/contrail/server_manager/server_mgr_main.py -c /opt/contrail/server_manager/sm-config.ini 0<&- &> /var/run/contrail-server-manager/contrail-server-manager.log& cd /var/www/html/thirdparty_packages dpkg-scanpackages . | gzip -9c > Packages.gz diff --git a/src/contrail_smgrd.u b/src/contrail-server-manager.u similarity index 95% rename from src/contrail_smgrd.u rename to src/contrail-server-manager.u index e3f65e44..c20f7c69 100755 --- a/src/contrail_smgrd.u +++ b/src/contrail-server-manager.u @@ -20,10 +20,10 @@ # PATH should only include /usr/* if it runs after the mountnfs.sh script PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC="Contrail Server Manager" -NAME=contrail_smgrd +NAME=contrail-server-manager DAEMON=/usr/sbin/$NAME -DAEMON_ARGS="-c /opt/contrail/server_manager/smgr_config.ini" -PIDFILE=/var/run/contrail_smgrd/contrail_smgrd.pid +DAEMON_ARGS="-c /opt/contrail/server_manager/sm-config.ini" +PIDFILE=/var/run/contrail-server-manager/contrail-server-manager.pid SCRIPTNAME=/etc/init.d/$NAME # Exit if the package is not installed @@ -123,7 +123,7 @@ case "$1" in start-stop-daemon --status -p $PIDFILE RETVAL="$?" if [ "$RETVAL" = 0 ]; then - echo "contrail_smgrd is running!" + echo "contrail-server-manager is running!" fi ;; diff --git a/src/contrail/reimage b/src/contrail/reimage index 25dcb474..27f7c56a 100755 --- a/src/contrail/reimage +++ b/src/contrail/reimage @@ -8,7 +8,7 @@ import sys _DEF_CENTOS_STOCK_IMAGE="centos-6.4" _DEF_UBUNTU_STOCK_IMAGE="ubuntu-12.04.3" -_DEF_INI="./smgr_client_config.ini" +_DEF_INI="./sm-client-config.ini" def execute_cmd_out(cmd, log=LOG): log.debug("Executing command: %s" % cmd) @@ -89,7 +89,7 @@ def parse_args(args_str): ) defaults = { - 'conf_file': '/etc/contrail_smgr/smgr_client_config.ini', + 'conf_file': '/opt/contrail/server-manager/client//sm-client-config.ini', 'distro': 'centos', 'package_image_id': '', 'image_id':'', diff --git a/src/contrail/server_xmltoserver.py b/src/contrail/server_xmltoserver.py index 6b1b3c6b..392e374c 100755 --- a/src/contrail/server_xmltoserver.py +++ b/src/contrail/server_xmltoserver.py @@ -16,15 +16,13 @@ def convert_xml_to_json(input_file, output_file): smgr_server_dict['server'] = [] for server in servers_list: smgr_server = {} - smgr_server['server_id'] = server['hostname'] - smgr_server['mac'] = server['mac'] - smgr_server['ip'] = server['ipaddr'] + smgr_server['id'] = server['hostname'] + smgr_server['mac_address'] = server['mac'] + smgr_server['ip_address'] = server['ipaddr'] server_params = {} - server_params['ifname'] = 'eth1' - server_params['compute_non_mgmt_ip'] = '' - server_params['compute_non_mgmt_gway'] = '' - smgr_server['server_params'] = server_params - smgr_server['power_address'] = server['ipmi'] + server_params['interface_name'] = 'eth1' + smgr_server['parameters'] = server_params + smgr_server['ipmi_address'] = server['ipmi'] smgr_server_dict['server'].append(smgr_server) servers_jdump = json.dumps(smgr_server_dict, indent=4) diff --git a/src/debian-contrail-smgr-client/debian/rules b/src/debian-contrail-smgr-client/debian/rules index 2f227443..a2338cd4 100755 --- a/src/debian-contrail-smgr-client/debian/rules +++ b/src/debian-contrail-smgr-client/debian/rules @@ -37,7 +37,7 @@ override_dh_auto_install: cp -r $(src_path)/client/*.json $(WS)/$(build_root)/opt/contrail/server_manager/client/ cp -r $(src_path)/utils/create_smgr_db.py $(WS)/$(build_root)/opt/contrail/server_manager/client/ cp -r $(src_path)/client/server-manager $(WS)/$(build_root)/opt/contrail/server_manager/client/ - cp -r $(src_path)/client/smgr_client_config.ini $(WS)/$(build_root)/opt/contrail/server_manager/client/ + cp -r $(src_path)/client/sm-client-config.ini $(WS)/$(build_root)/opt/contrail/server_manager/client/ echo "auto_install" get-orig-source: diff --git a/src/debian-contrail-smgr/debian/postinst b/src/debian-contrail-smgr/debian/postinst index 8f860feb..b9284a71 100644 --- a/src/debian-contrail-smgr/debian/postinst +++ b/src/debian-contrail-smgr/debian/postinst @@ -5,21 +5,39 @@ set -e echo "running post install" -#cp -r /etc/contrail/cobbler /etc/ -cp -r /etc/contrail/puppet /etc/ - -cp /etc/contrail/cobbler/dhcp.template.u /etc/cobbler/dhcp.template +#cp -r /etc/contrail_smgr/cobbler /etc/ +cp -r /etc/contrail_smgr/puppet /etc/ +#mv /etc/cobbler/distro_signatures.json /etc/cobbler/distro_signatures.json-save +#mv /var/lib/cobbler/distro_signatures.json /var/lib/cobbler/distro_signatures.json-save +cp /etc/contrail_smgr/cobbler/distro_signatures.json-esxi55 /etc/cobbler/distro_signatures.json +cp /etc/contrail_smgr/cobbler/distro_signatures.json-esxi55 /var/lib/cobbler/distro_signatures.json + +# Copy cobbler boot.cfg template file for esxi5.5 +cp -f /etc/contrail_smgr/cobbler/bootcfg_esxi55.template /etc/cobbler/pxe + +# Copy cobbler pxesystem template file for esxi +#mv /etc/cobbler/pxe/pxesystem_esxi.template /etc/cobbler/pxe/pxesystem_esxi.template-save +cp /etc/contrail_smgr/cobbler/pxesystem_esxi.template /etc/cobbler/pxe +cp /etc/contrail_smgr/cobbler/dhcp.template.u /etc/cobbler/dhcp.template mkdir -p /var/www/html mkdir -p /var/www/html/kickstarts -cp -r /etc/contrail/kickstarts /var/www/html/ +cp -r /etc/contrail_smgr/kickstarts /var/www/html/ +mkdir -p /var/www/html/contrail +mkdir -p /etc/mail/ +cp /etc/contrail_smgr/sendmail.cf /etc/mail/ + +#cp /usr/bin/server_manager/dhcp.template /etc/cobbler/ +#cp -r /usr/bin/server_manager/kickstarts /var/www/html/ mkdir -p /var/www/html/contrail +mkdir -p /var/www/html/thirdparty_packages cp -u /etc/puppet/puppet_init_rd /var/www/cobbler/aux/puppet easy_install argparse easy_install paramiko easy_install pycrypto +easy_install ordereddict -mkdir -p /etc/contrail/images/ +mkdir -p /etc/contrail_smgr/images/ sed -i 's/puppet_auto_setup: 0/puppet_auto_setup: 1/g' /etc/cobbler/settings sed -i 's/sign_puppet_certs_automatically: 0/sign_puppet_certs_automatically: 1/g' /etc/cobbler/settings @@ -34,27 +52,29 @@ HOST_IP=`echo $HOST_IP_LIST | cut -d',' -f1` echo $HOST_IP sed -i "s/10.84.51.11/$HOST_IP/" /etc/cobbler/settings -update-rc.d contrail_smgrd defaults -#/sbin/chkconfig --add contrail_smgrd -sed -i "s/authn_denyall/authn_testing/g" /etc/cobbler/modules.conf -sed -i "s/authn_configfile/authn_testing/g" /etc/cobbler/modules.conf +update-rc.d contrail-server-manager defaults +#/sbin/chkconfig --add contrail-server-manager +sed -i "s/module = authn_.*/module = authn_configfile/g" /etc/cobbler/modules.conf -sed -i "s/127.0.0.1/$HOST_IP/g" /opt/contrail/server_manager/smgr_config.ini +sed -i "s/127.0.0.1/$HOST_IP/g" /opt/contrail/server_manager/sm-config.ini service apache2 restart service xinetd restart service cobbler restart - service puppetmaster restart +service postfix stop +service sendmail restart +#chkconfig apache2 on +#chkconfig puppetmaster on +#chkconfig contrail_smgrd on -#update-rc.d apache2 defaults -#update-rc.d xinetd defaults -#update-rc.d cobbler defaults -#update-rc.d puppetmaster defaults - #puppet master -service contrail_smgrd restart +update-rc.d apache2 defaults +update-rc.d xinetd defaults +update-rc.d cobbler defaults +update-rc.d puppetmaster defaults +service contrail-server-manager restart diff --git a/src/debian-contrail-smgr/debian/rules b/src/debian-contrail-smgr/debian/rules index 414ac913..861e2919 100755 --- a/src/debian-contrail-smgr/debian/rules +++ b/src/debian-contrail-smgr/debian/rules @@ -12,6 +12,8 @@ export LD_LIBRARY_PATH := $(LD_LIBRARY_PATH):debian/contrail-control/usr/lib64/c export BUILDTIME := $(shell date -u +%y%m%d%H%M) export src_path := ./../ export build_root := debian/contrail-smgr +python_version=$(python -c "import sys; print '%s.%s' % sys.version_info[0:2]") +python_dir=python$python_version BUILDTAG = ifdef TAG BUILDTAG = $(TAG) @@ -22,37 +24,43 @@ endif %: dh $@ -override_dh_auto_build: +override_dh_auto_build: echo "auto_build" -override_dh_auto_install: +override_dh_auto_install: # Cleanup directories rm -rf $(build_root)/opt/contrail/server_manager rm -rf $(build_root)/etc - + # Install directories install -d -m 755 $(build_root)/opt/contrail/server_manager/ install -d -m 754 $(build_root)/etc/init.d/ install -d -m 754 $(build_root)/etc/contrail_smgr/ install -d -m 754 $(build_root)/usr/sbin/ - + install -d -m 755 $(build_root)/etc/puppet/ + install -d -m 755 $(build_root)/etc/cobbler + # Copy/install files cp -r $(src_path)/server_mgr_main.py $(WS)/$(build_root)/opt/contrail/server_manager/ cp -r $(src_path)/server_mgr_db.py $(WS)/$(build_root)/opt/contrail/server_manager/ cp -r $(src_path)/server_mgr_cobbler.py $(WS)/$(build_root)/opt/contrail/server_manager/ cp -r $(src_path)/server_mgr_puppet.py $(WS)/$(build_root)/opt/contrail/server_manager/ + cp -r $(src_path)/server_mgr_exception.py $(WS)/$(build_root)/opt/contrail/server_manager/ + cp -r $(src_path)/server_mgr_logger.py $(WS)/$(build_root)/opt/contrail/server_manager/ cp -r $(src_path)/smgr_dhcp_event.py $(WS)/$(build_root)/opt/contrail/server_manager/ - cp -r $(src_path)/smgr_config.ini $(WS)/$(build_root)/opt/contrail/server_manager/ + cp -r $(src_path)/sm-config.ini $(WS)/$(build_root)/opt/contrail/server_manager/ + cp -r $(src_path)/tags.ini $(WS)/$(build_root)/etc/contrail_smgr/ cp -r $(src_path)/client $(WS)/$(build_root)/opt/contrail/server_manager/ - cp -r $(src_path)/contrail_smgrd.start $(WS)/$(build_root)/usr/sbin/contrail_smgrd + cp -r $(src_path)/contrail-server-manager.start $(WS)/$(build_root)/usr/sbin/contrail-server-manager cp -r $(src_path)/third_party/bottle.py $(WS)/$(build_root)/opt/contrail/server_manager/ - cp $(src_path)/contrail_smgrd.u $(WS)/$(build_root)/etc/init.d/contrail_smgrd -# cp $(src_path)/contrail_smgrd.sbin $(WS)/$(build_root)/usr/sbin/contrail_smgrd + cp $(src_path)/contrail-server-manager.u $(WS)/$(build_root)/etc/init.d/contrail-server-manager cp -r $(src_path)/puppet $(WS)/$(build_root)/etc/contrail_smgr/ + cp -r $(src_path)/repos/contrail-centos-repo $(WS)/$(build_root)/etc/contrail_smgr/ cp -r $(src_path)/cobbler $(WS)/$(build_root)/etc/contrail_smgr/ cp -r $(src_path)/kickstarts $(WS)/$(build_root)/etc/contrail_smgr/ - cp -r $(src_path)/client/smgr_client_config.ini $(WS)/$(build_root)/etc/contrail_smgr/ + cp -r $(src_path)/client/sm-client-config.ini $(WS)/$(build_root)/etc/contrail_smgr/ echo "auto_install" get-orig-source: uscan --verbose --rename --destdir=$(WS) + diff --git a/src/kickstarts/contrail-centos.ks b/src/kickstarts/contrail-centos.ks index 434f217e..7771399b 100644 --- a/src/kickstarts/contrail-centos.ks +++ b/src/kickstarts/contrail-centos.ks @@ -101,6 +101,8 @@ echo " pluginsync = true" >> /etc/puppet/puppet.conf echo " ignorecache = true" >> /etc/puppet/puppet.conf echo " usecacheonfailure = false" >> /etc/puppet/puppet.conf echo " listen = true" >> /etc/puppet/puppet.conf +echo "[main]" >> /etc/puppet/puppet.conf +echo "runinterval=180" >> /etc/puppet/puppet.conf cat >/tmp/puppet-auth.conf < true, require => [ Package['contrail-openstack-analytics'], Exec['analytics-venv'] ], - subscribe => [ File['/etc/contrail/collector.conf'], - File['/etc/contrail/query-engine.conf'], + subscribe => [ File['/etc/contrail/contrail-collector.conf'], + File['/etc/contrail/contrail-query-engine.conf'], File['/etc/contrail/contrail-analytics-api.conf'] ], ensure => running, } diff --git a/src/puppet/modules/contrail-config/contrail-api-centos.ini.erb b/src/puppet/modules/contrail-config/contrail-api-centos.ini.erb index 778b1a17..72be45f0 100644 --- a/src/puppet/modules/contrail-config/contrail-api-centos.ini.erb +++ b/src/puppet/modules/contrail-config/contrail-api-centos.ini.erb @@ -1,5 +1,5 @@ [program:contrail-api] -command=/bin/bash -c "source /opt/contrail/api-venv/bin/activate && exec python /opt/contrail/api-venv/lib/python2.7/site-packages/vnc_cfg_api_server/vnc_cfg_api_server.py --conf_file /etc/contrail/api_server.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s" +command=/bin/bash -c "source /opt/contrail/api-venv/bin/activate && exec python /opt/contrail/api-venv/lib/python2.7/site-packages/vnc_cfg_api_server/vnc_cfg_api_server.py --conf_file /etc/contrail/contrail-api.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s" numprocs=<%= contrail_api_nworkers %> process_name=%(process_num)s redirect_stderr=true diff --git a/src/puppet/modules/contrail-config/manifests/init.pp b/src/puppet/modules/contrail-config/manifests/init.pp index 92e933d4..79db7929 100644 --- a/src/puppet/modules/contrail-config/manifests/init.pp +++ b/src/puppet/modules/contrail-config/manifests/init.pp @@ -214,7 +214,7 @@ } # Ensure all config files with correct content are present. - config-template-scripts { ["api_server.conf", + config-template-scripts { ["contrail-api.conf", "schema_transformer.conf", "svc_monitor.conf", "discovery.conf", @@ -385,7 +385,7 @@ ############################### - File["/etc/contrail/ctrl-details"]->File["/etc/contrail/service.token"]->Config-template-scripts["api_server.conf"]->File["/etc/contrail/contrail_plugin.ini"]->Config-template-scripts["schema_transformer.conf"]->Config-template-scripts["svc_monitor.conf"]->Config-template-scripts["discovery.conf"]->Config-template-scripts["vnc_api_lib.ini"] + File["/etc/contrail/ctrl-details"]->File["/etc/contrail/service.token"]->Config-template-scripts["contrail-api.conf"]->File["/etc/contrail/contrail_plugin.ini"]->Config-template-scripts["schema_transformer.conf"]->Config-template-scripts["svc_monitor.conf"]->Config-template-scripts["discovery.conf"]->Config-template-scripts["vnc_api_lib.ini"] # Initialize the multi tenancy option will update latter based on vns argument if ($contrail_multi_tenancy == "True") { diff --git a/src/puppet/modules/contrail-config/templates/contrail-api-centos.ini.erb b/src/puppet/modules/contrail-config/templates/contrail-api-centos.ini.erb index 538f9132..b6ad3af6 100644 --- a/src/puppet/modules/contrail-config/templates/contrail-api-centos.ini.erb +++ b/src/puppet/modules/contrail-config/templates/contrail-api-centos.ini.erb @@ -1,5 +1,5 @@ [program:contrail-api] -command=/bin/bash -c "source /opt/contrail/api-venv/bin/activate && exec python /opt/contrail/api-venv/lib/python2.7/site-packages/vnc_cfg_api_server/vnc_cfg_api_server.py --conf_file /etc/contrail/api_server.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s" +command=/bin/bash -c "source /opt/contrail/api-venv/bin/activate && exec python /opt/contrail/api-venv/lib/python2.7/site-packages/vnc_cfg_api_server/vnc_cfg_api_server.py --conf_file /etc/contrail/contrail-api.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s" numprocs=<%= contrail_api_nworkers %> process_name=%(process_num)s redirect_stderr=true diff --git a/src/puppet/modules/contrail-config/templates/api_server.conf.erb b/src/puppet/modules/contrail-config/templates/contrail-api.conf.erb similarity index 100% rename from src/puppet/modules/contrail-config/templates/api_server.conf.erb rename to src/puppet/modules/contrail-config/templates/contrail-api.conf.erb diff --git a/src/puppet/modules/contrail-config/templates/contrail-api.ini.erb b/src/puppet/modules/contrail-config/templates/contrail-api.ini.erb index 09105848..1cc460c2 100644 --- a/src/puppet/modules/contrail-config/templates/contrail-api.ini.erb +++ b/src/puppet/modules/contrail-config/templates/contrail-api.ini.erb @@ -1,5 +1,5 @@ [program:contrail-api] -command=/usr/bin/contrail-api --conf_file /etc/contrail/api_server.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s +command=/usr/bin/contrail-api --conf_file /etc/contrail/contrail-api.conf --listen_port <%= contrail_api_port_base %>%(process_num)01d --worker_id %(process_num)s numprocs=<%= contrail_api_nworkers %> process_name=%(process_num)s redirect_stderr=true diff --git a/src/puppet/modules/contrail-config/templates/discovery.conf.erb b/src/puppet/modules/contrail-config/templates/contrail-discovery.conf.erb similarity index 100% rename from src/puppet/modules/contrail-config/templates/discovery.conf.erb rename to src/puppet/modules/contrail-config/templates/contrail-discovery.conf.erb diff --git a/src/puppet/modules/contrail-control/manifests/init.pp b/src/puppet/modules/contrail-control/manifests/init.pp index 1e1af5e6..b2dc05b7 100644 --- a/src/puppet/modules/contrail-control/manifests/init.pp +++ b/src/puppet/modules/contrail-control/manifests/init.pp @@ -70,7 +70,7 @@ } # Ensure all config files with correct content are present. - control-template-scripts { ["dns.conf", "control-node.conf"]: } + control-template-scripts { ["dns.conf", "contrail-control.conf"]: } # Hard-coded to be taken as parameter of vnsi and multi-tenancy options need to be passed to contrail-control too. # The below script can be avoided. Sets up puppet agent and waits to get certificate from puppet master. @@ -89,7 +89,7 @@ logoutput => "true" } - Package["contrail-openstack-control"]->Exec['control-venv']->Control-template-scripts["control-node.conf"]->Control-template-scripts["dns.conf"]->Exec["control-server-setup"] + Package["contrail-openstack-control"]->Exec['control-venv']->Control-template-scripts["contrail-control.conf"]->Control-template-scripts["dns.conf"]->Exec["control-server-setup"] # Below is temporary to work-around in Ubuntu as Service resource fails # as upstart is not correctly linked to /etc/init.d/service-name @@ -115,7 +115,7 @@ enable => true, require => [ Package['contrail-openstack-control'], Exec['control-venv'] ], - subscribe => File['/etc/contrail/control-node.conf'], + subscribe => File['/etc/contrail/contrail-control.conf'], ensure => running, } if ($operatingsystem == "Ubuntu") { diff --git a/src/server_mgr_cobbler.py b/src/server_mgr_cobbler.py index 5e375c8c..9beaecc1 100755 --- a/src/server_mgr_cobbler.py +++ b/src/server_mgr_cobbler.py @@ -16,35 +16,6 @@ _DEF_BASE_DIR = '/etc/contrail/' _CONTRAIL_CENTOS_REPO = 'contrail-centos-repo' - -class cobTokenCheckThread(threading.Thread): - - ''' Class to run function that keeps validating the cobbler token - periodically (every 30 minutes) on a new thread. ''' - - def __init__(self, timer, server, token): - threading.Thread.__init__(self) - self._timer = timer - self._server = server - self._token = token - - def run(self): - _check_cobbler_token(self._timer, self._server, - self._token) - - -def _check_cobbler_token(timer, server, token): - ''' This function keeps checking and validating the cobbler token - periodically. It's called on a new thread and keep running - for ever. ''' - try: - while True: - time.sleep(timer) - server.token_check(token) - except: - print "Error in check cobbler token thread" - - class ServerMgrCobbler: _cobbler_ip = _DEF_COBBLER_IP @@ -107,21 +78,30 @@ def __init__(self, base_dir=_DEF_BASE_DIR, self._server.save_repo(rid, self._token) # Issue cobbler reposync for this repo cmd = "cobbler reposync --only=" + _CONTRAIL_CENTOS_REPO - subprocess.call(cmd, shell=True) - # Start a thread to keep cobbler token active. Comment out when - # needed for testing... - thread1 = cobTokenCheckThread( - self._COB_TOKEN_CHECK_TIMER, self._server, self._token) - # Make the thread as daemon - thread1.daemon = True - thread1.start() + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + msg = ("Cobbler Init: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + raise ServerMgrException(msg) except Exception as e: raise e # End of __init__ + # Function to check if cobbler token is valid or not, before calling any + # XMLRPC calls that need a valid token. If token is not valid, the function + # acquires a new token from cobbler. + def _validate_token(self, token, resource): + valid = self._server.check_access_no_fail(token, resource) + if not valid: + self._token = self._server.login( + self._cobbler_username, self._cobbler_password) + # end _validate_token + def create_distro(self, distro_name, image_type, path, kernel_file, initrd_file, cobbler_ip_address): try: + # Validate cobbler token + self._validate_token(self._token, "distro") # If distro already exists in cobbler, nothing to do. distro = self._server.find_distro({"name": distro_name}) if distro: @@ -184,6 +164,8 @@ def create_profile(self, profile_name, distro_name, image_type, ks_file, kernel_options, ks_meta): try: + # Validate cobbler token + self._validate_token(self._token, "profile") # If profile exists, nothing to do, jus return. profile = self._server.find_profile({"name": profile_name}) if profile: @@ -210,6 +192,8 @@ def create_profile(self, profile_name, def create_repo(self, repo_name, mirror): try: + # Validate cobbler token + self._validate_token(self._token, "repo") repo = self._server.find_repo({"name": repo_name}) if repo: rid = self._server.get_repo_handle( @@ -224,7 +208,11 @@ def create_repo(self, repo_name, mirror): self._server.save_repo(rid, self._token) # Issue cobbler reposync for this repo cmd = "cobbler reposync --only=" + repo_name - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + msg = ("create_repo: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + raise ServerMgrException(msg) except Exception as e: raise e # End of create_repo @@ -233,8 +221,10 @@ def create_system(self, system_name, profile_name, package_image_id, mac, ip, subnet, gway, system_domain, ifname, enc_passwd, server_license, esx_nicname, power_type, power_user, power_pass, power_address, - base_image, server_ip): + base_image, server_ip, partition=None): try: + # Validate cobbler token + self._validate_token(self._token, "system") system = self._server.find_system({"name": system_name}) if system: system_id = self._server.get_system_handle( @@ -255,7 +245,7 @@ def create_system(self, system_name, profile_name, package_image_id, system_id, "power_address", power_address, self._token) # For centos, create a sub-profile that has the repo for # package_image_id also made available for this system. - if ((base_image['image_type'] == "centos") and + if ((base_image['type'] == "centos") and (package_image_id)): sub_profile_name = profile_name + "-" + package_image_id sub_profile = self._server.find_profile( @@ -296,11 +286,15 @@ def create_system(self, system_name, profile_name, package_image_id, ks_metadata += ' ip_address=' + ip ks_metadata += ' system_name=' + system_name ks_metadata += ' system_domain=' + system_domain + if partition: + ks_metadata += ' partition=' + partition + else: + ks_metadata += ' partition=' + '/dev/sd?' if package_image_id: ks_metadata += ' contrail_repo_name=' + \ package_image_id - if ((base_image['image_type'] == 'esxi5.1') or - (base_image['image_type'] == 'esxi5.5')): + if ((base_image['type'] == 'esxi5.1') or + (base_image['type'] == 'esxi5.5')): ks_metadata += ' server_license=' + server_license ks_metadata += ' esx_nicname=' + esx_nicname @@ -322,7 +316,7 @@ def create_system(self, system_name, profile_name, package_image_id, self._server.modify_system(system_id, 'ksmeta', ks_metadata, self._token) - if (base_image['image_type'] == "ubuntu"): + if (base_image['type'] == "ubuntu"): kernel_options = 'system_name=' + system_name kernel_options += ' system_domain=' + system_domain kernel_options += ' ip_address=' + ip @@ -346,6 +340,8 @@ def create_system(self, system_name, profile_name, package_image_id, def enable_system_netboot(self, system_name): try: + # Validate cobbler token + self._validate_token(self._token, "system") system = self._server.find_system({"name": system_name}) if not system: raise Exception( @@ -364,6 +360,8 @@ def enable_system_netboot(self, system_name): def reboot_system(self, reboot_system_list): try: + # Validate cobbler token + self._validate_token(self._token, "system") power = { "power" : "reboot", "systems" : reboot_system_list } @@ -387,6 +385,8 @@ def reboot_system(self, reboot_system_list): def delete_distro(self, distro_name): try: + # Validate cobbler token + self._validate_token(self._token, "distro") self._server.remove_distro(distro_name, self._token) except Exception as e: pass @@ -394,6 +394,8 @@ def delete_distro(self, distro_name): def delete_repo(self, repo_name): try: + # Validate cobbler token + self._validate_token(self._token, "repo") self._server.remove_repo(repo_name, self._token) except Exception as e: pass @@ -401,6 +403,8 @@ def delete_repo(self, repo_name): def delete_profile(self, profile_name): try: + # Validate cobbler token + self._validate_token(self._token, "profile") self._server.remove_profile(profile_name, self._token) except Exception as e: pass @@ -408,6 +412,8 @@ def delete_profile(self, profile_name): def delete_system(self, system_name): try: + # Validate cobbler token + self._validate_token(self._token, "system") system = self._server.find_system({"name": system_name}) if system: self._server.remove_system(system_name, self._token) @@ -417,6 +423,8 @@ def delete_system(self, system_name): def sync(self): try: + # Validate cobbler token + self._validate_token(self._token, "system") self._server.sync(self._token) except Exception as e: raise e diff --git a/src/server_mgr_db.py b/src/server_mgr_db.py index 0d17afee..93bb3cae 100755 --- a/src/server_mgr_db.py +++ b/src/server_mgr_db.py @@ -9,61 +9,45 @@ from server_mgr_logger import ServerMgrlogger as ServerMgrlogger def_server_db_file = 'smgr_data.db' -pod_table = 'pod_table' -rack_table = 'rack_table' cluster_table = 'cluster_table' -vns_table = 'vns_table' -cloud_table = 'cloud_table' server_table = 'server_table' image_table = 'image_table' server_status_table = 'status_table' +server_tags_table = 'server_tags_table' _DUMMY_STR = "DUMMY_STR" class ServerMgrDb: - _pod_table_cols = [] - _rack_table_cols = [] _cluster_table_cols = [] - _vns_table_cols = [] - _cloud_table_cols = [] _server_table_cols = [] _image_table_cols = [] _status_table_cols = [] + _server_tags_table_cols = [] # Keep list of table columns def _get_table_columns(self): try: with self._con: cursor = self._con.cursor() - cursor.execute("SELECT * FROM " + - pod_table + " WHERE pod_id=?", (_DUMMY_STR,)) - self._pod_table_cols = [x[0] for x in cursor.description] - cursor.execute("SELECT * FROM " + - rack_table + " WHERE rack_id=?", (_DUMMY_STR,)) - self._rack_table_cols = [x[0] for x in cursor.description] cursor.execute( "SELECT * FROM " + - cluster_table + " WHERE cluster_id=?", (_DUMMY_STR,)) - self._cluster_table_cols = [x[0] for x in cursor.description] + server_table + " WHERE id=?", (_DUMMY_STR,)) + self._server_table_cols = [x[0] for x in cursor.description] cursor.execute( "SELECT * FROM " + - server_table + " WHERE server_id=?", (_DUMMY_STR,)) - self._server_table_cols = [x[0] for x in cursor.description] + server_tags_table + " WHERE tag_id=?", (_DUMMY_STR,)) + self._server_tags_table_cols = [x[0] for x in cursor.description] cursor.execute( "SELECT * FROM " + - image_table + " WHERE image_id=?", (_DUMMY_STR,)) + image_table + " WHERE id=?", (_DUMMY_STR,)) self._image_table_cols = [x[0] for x in cursor.description] cursor.execute("SELECT * FROM " + - vns_table + " WHERE vns_id=?", (_DUMMY_STR,)) - self._vns_table_cols = [x[0] for x in cursor.description] - cursor.execute( - "SELECT * FROM " + - cloud_table + " WHERE cloud_id=?", (_DUMMY_STR,)) - self._cloud_table_cols = [x[0] for x in cursor.description] + cluster_table + " WHERE id=?", (_DUMMY_STR,)) + self._cluster_table_cols = [x[0] for x in cursor.description] cursor.execute( "SELECT * FROM " + - server_status_table + " WHERE server_id=?", (_DUMMY_STR,)) + server_status_table + " WHERE id=?", (_DUMMY_STR,)) self._status_table_cols = [x[0] for x in cursor.description] except Exception as e: raise e @@ -75,60 +59,55 @@ def __init__(self, db_file_name=def_server_db_file): self._con = lite.connect(db_file_name) with self._con: cursor = self._con.cursor() - # Create pod table. - cursor.execute("CREATE TABLE IF NOT EXISTS " + pod_table + - """ (pod_id TEXT PRIMARY KEY, rack_id TEXT)""") - # Create rack table. - cursor.execute( - "CREATE TABLE IF NOT EXISTS " + rack_table + - """ (rack_id TEXT PRIMARY KEY, cluster_id TEXT)""") # Create cluster table. cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table + - """ (cluster_id TEXT PRIMARY KEY)""") - # Create vns table. - cursor.execute("CREATE TABLE IF NOT EXISTS " + vns_table + - """ (vns_id TEXT PRIMARY KEY, - vns_params TEXT, + """ (id TEXT PRIMARY KEY, + parameters TEXT, email TEXT)""") - # Create cloud table. - cursor.execute("CREATE TABLE IF NOT EXISTS " + cloud_table + - """ (cloud_id TEXT PRIMARY KEY)""") # Create image table cursor.execute("CREATE TABLE IF NOT EXISTS " + - image_table + """ (image_id TEXT PRIMARY KEY, - image_version TEXT, image_type TEXT, - image_params TEXT)""") + image_table + """ (id TEXT PRIMARY KEY, + version TEXT, type TEXT, path TEXT, + parameters TEXT)""") # Create status table cursor.execute("CREATE TABLE IF NOT EXISTS " + - server_status_table + """ (server_id TEXT PRIMARY KEY, + server_status_table + """ (id TEXT PRIMARY KEY, server_status TEXT)""") # Create server table cursor.execute( "CREATE TABLE IF NOT EXISTS " + server_table + - """ (mac TEXT PRIMARY KEY NOT NULL, - server_id TEXT, static_ip varchar default 'N', - ip TEXT, mask TEXT, gway TEXT, domain TEXT, - pod_id TEXT, rack_id TEXT, cluster_id TEXT, - vns_id TEXT, cloud_id TEXT, base_image_id TEXT, - package_image_id TEXT, passwd TEXT, - update_time TEXT, disc_flag varchar default 'N', - server_params TEXT, roles TEXT, power_user TEXT, - power_pass TEXT, power_address TEXT, - power_type TEXT, intf_control TEXT, + """ (mac_address TEXT PRIMARY KEY NOT NULL, + id TEXT, host_name TEXT, static_ip varchar default 'N', + ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT, + cluster_id TEXT, base_image_id TEXT, + package_image_id TEXT, password TEXT, + last_update TEXT, discovered varchar default 'false', + parameters TEXT, roles TEXT, ipmi_username TEXT, + ipmi_password TEXT, ipmi_address TEXT, + ipmi_type TEXT, intf_control TEXT, intf_data TEXT, intf_bond TEXT, - email TEXT, - UNIQUE (server_id))""") + email TEXT, status TEXT, + tag1 TEXT, tag2 TEXT, tag3 TEXT, + tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT, + UNIQUE (id))""") + # Create server tags table + cursor.execute( + "CREATE TABLE IF NOT EXISTS " + server_tags_table + + """ (tag_id TEXT PRIMARY KEY NOT NULL, + value TEXT, + UNIQUE (tag_id), + UNIQUE (value))""") self._get_table_columns() self._smgr_log.log(self._smgr_log.DEBUG, "Created tables") - # During init, we check if any of the VNS in DB are missing any Storage Parameters (Generated UUIDs) - vns_list = self._get_items(vns_table, None, + # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs) + cluster_list = self._get_items(cluster_table, None, None, True, None) - for vns in vns_list: - # Check if storage parameters are present in VNS, else generate them - if 'storage_fsid' not in set(eval(vns['vns_params'])) or 'storage_virsh_uuid' not in set(eval( - vns['vns_params'])): - self.update_vns_uuids(vns) + for cluster in cluster_list: + # Check if storage parameters are present in Cluster, else generate them + if 'storage_fsid' not in set(eval(cluster['parameters'])) or 'storage_virsh_uuid' not in set(eval( + cluster['parameters'])): + self.update_cluster_uuids(cluster) except e: raise e # End of __init__ @@ -138,14 +117,11 @@ def delete_tables(self): with self._con: cursor = self._con.cursor() cursor.executescript(""" - .DELETE FROM """ + pod_table + """; - .DELETE FROM """ + rack_table + """; - .DELETE FROM """ + cluster_table + """; - .DELETE FROM """ + vns_table + """; - .DELETE FROM """ + cloud_table + """; - .DELETE FROM """ + server_table + """; - .DELETE FROM """ + server_status_table + """; - .DELETE FROM """ + image_table + ";") + DELETE FROM """ + cluster_table + """; + DELETE FROM """ + server_table + """; + DELETE FROM """ + server_tags_table + """; + DELETE FROM """ + server_status_table + """; + DELETE FROM """ + image_table + ";") except: raise e # End of delete_tables @@ -156,8 +132,8 @@ def get_server_id(self, server_mac): server_mac = str(EUI(server_mac)).replace("-", ":") with self._con: cursor = self._con.cursor() - cursor.execute("SELECT server_id FROM " + - server_table + " WHERE mac=?", + cursor.execute("SELECT id FROM " + + server_table + " WHERE mac_address=?", (server_mac,)) row = cursor.fetchone() if row: @@ -168,13 +144,31 @@ def get_server_id(self, server_mac): return None # end get_server_id - def get_server_mac(self, server_id): + # Below function returns value corresponding to tag_id from + # server_tags_table + def get_server_tag(self, tag_id): try: with self._con: cursor = self._con.cursor() - cursor.execute("SELECT mac FROM " + - server_table + " WHERE server_id=?", - (server_id,)) + cursor.execute("SELECT value FROM " + + server_tags_table + " WHERE tag_id=?", + (tag_id,)) + row = cursor.fetchone() + if row: + return row[0] + else: + return None + except: + return None + # end get_server_tag + + def get_server_mac(self, id): + try: + with self._con: + cursor = self._con.cursor() + cursor.execute("SELECT mac_address FROM " + + server_table + " WHERE id=?", + (id,)) row = cursor.fetchone() if row: return row[0] @@ -197,10 +191,26 @@ def _add_row(self, table_name, dict): raise e # end _add_row - def _delete_row(self, table_name, match_key, match_value): + # Generic function to delete rows matching given criteria + # from given table. + # Match dict is dictionary of columns and values to match for. + # unmatch dict is not of dictionaty of columns and values to match for. + def _delete_row(self, table_name, + match_dict=None, unmatch_dict=None): try: - delete_str = "DELETE FROM %s WHERE %s='%s'" \ - % (table_name, match_key, match_value) + delete_str = "DELETE FROM %s" %(table_name) + # form a string to provide to where match clause + match_list = [] + if match_dict: + match_list = ["%s = \'%s\'" %( + k,v) for k,v in match_dict.iteritems()] + if unmatch_dict: + match_list += ["%s != \'%s\'" %( + k,v) for k,v in unmatch_dict.iteritems()] + if match_list: + match_str = " and ".join(match_list) + delete_str+= " WHERE " + match_str + # end if match_list with self._con: cursor = self._con.cursor() cursor.execute(delete_str) @@ -208,34 +218,55 @@ def _delete_row(self, table_name, match_key, match_value): raise e # End _delete_row - def _modify_row(self, table_name, dict, match_key, match_value): + def _modify_row(self, table_name, dict, + match_dict=None, unmatch_dict=None): try: keys, values = zip(*dict.items()) modify_str = "UPDATE %s SET " % (table_name) update_list = ",".join(key + "=?" for key in keys) modify_str += update_list - modify_str += " WHERE %s=?" % (match_key) - values = values + (match_value,) + match_list = [] + if match_dict: + match_list = ["%s = ?" %( + k) for k in match_dict.iterkeys()] + match_values = [v for v in match_dict.itervalues()] + if unmatch_dict: + match_list += ["%s != ?" %( + k) for k in unmatch_dict.iterkeys()] + match_values += [v for v in unmatch_dict.itervalues()] + if match_list: + match_str = " and ".join(match_list) + match_values_str = ",".join(match_values) + modify_str += " WHERE " + match_str + values += (match_values_str,) with self._con: cursor = self._con.cursor() cursor.execute(modify_str, values) except Exception as e: raise e - def _get_items(self, table_name, match_key=None, - match_value=None, detail=False, primary_key=None): + def _get_items( + self, table_name, match_dict=None, + unmatch_dict=None, detail=False, always_fields=None): try: with self._con: cursor = self._con.cursor() if detail: sel_cols = "*" else: - sel_cols = primary_key - if ((not match_key) or (not match_value)): - select_str = "SELECT %s FROM %s" % (sel_cols, table_name) - else: - select_str = "SELECT %s FROM %s WHERE %s=\'%s\'" \ - % (sel_cols, table_name, match_key, match_value) + sel_cols = ",".join(always_fields) + select_str = "SELECT %s FROM %s" % (sel_cols, table_name) + # form a string to provide to where match clause + match_list = [] + if match_dict: + match_list = ["%s = \'%s\'" %( + k,v) for k,v in match_dict.iteritems()] + if unmatch_dict: + match_list += ["%s != \'%s\'" %( + k,v) for k,v in unmatch_dict.iteritems()] + if match_list: + match_str = " and ".join(match_list) + select_str+= " WHERE " + match_str cursor.execute(select_str) rows = [x for x in cursor] cols = [x[0] for x in cursor.description] @@ -252,102 +283,103 @@ def _get_items(self, table_name, match_key=None, def add_cluster(self, cluster_data): try: - self._add_row(cluster_table, cluster_data) - except Exception as e: - raise e - # End of add_cluster - - def add_vns(self, vns_data): - try: - # Store vns_params dictionary as a text field - vns_params = vns_data.pop("vns_params", None) - if vns_params is not None: - vns_data['vns_params'] = str(vns_params) + # Store cluster_parameters dictionary as a text field + cluster_parameters = cluster_data.pop("parameters", None) + if cluster_parameters is not None: + cluster_data['parameters'] = str(cluster_parameters) # Store email list as text field - email = vns_data.pop("email", None) + email = cluster_data.pop("email", None) if email is not None: - vns_data['email'] = str(email) - self._add_row(vns_table, vns_data) + cluster_data['email'] = str(email) + self._add_row(cluster_table, cluster_data) except Exception as e: raise e - # End of add_vns + # End of add_cluster def add_server(self, server_data): try: - if 'mac' in server_data: - server_data['mac'] = str( - EUI(server_data['mac'])).replace("-", ":") + if 'mac_address' in server_data: + server_data['mac_address'] = str( + EUI(server_data['mac_address'])).replace("-", ":") # Store roles list as a text field roles = server_data.pop("roles", None) - vns_id = server_data.get('vns_id', None) - if vns_id: - self.check_obj("vns", "vns_id", vns_id) + cluster_id = server_data.get('cluster_id', None) + if cluster_id: + self.check_obj( + "cluster", {"id" : cluster_id}) if roles is not None: server_data['roles'] = str(roles) - intf_control = server_data.pop("control", None) + intf_control = server_data.pop("control_data_network", None) if intf_control: server_data['intf_control'] = str(intf_control) - intf_data = server_data.pop("data", None) - if intf_data: - server_data['intf_data'] = str(intf_data) - intf_bond = server_data.pop("bond", None) + intf_bond = server_data.pop("bond_interface", None) if intf_bond: server_data['intf_bond'] = str(intf_bond) # Store email list as text field email = server_data.pop("email", None) if email: server_data['email'] = str(email) - - + # store tags if any + server_tags = server_data.pop("tag", None) + if server_tags is not None: + tags_dict = self.get_server_tags(detail=True) + rev_tags_dict = dict((v,k) for k,v in tags_dict.iteritems()) + for k,v in server_tags.iteritems(): + server_data[rev_tags_dict[k]] = v # Store server_params dictionary as a text field - server_params = server_data.pop("server_params", None) - if server_params is not None: - server_data['server_params'] = str(server_params) + server_parameters = server_data.pop("parameters", None) + if server_parameters is not None: + server_data['parameters'] = str(server_parameters) self._add_row(server_table, server_data) - # Create an entry for cluster, pod, rack etc if needed. - pod_id = server_data.get('pod_id', None) - if pod_id: - pod_data = {"pod_id": pod_id} - self._add_row(pod_table, pod_data) - rack_id = server_data.get('rack_id', None) - if rack_id: - rack_data = {"rack_id": rack_id} - self._add_row(rack_table, rack_data) - cluster_id = server_data.get('cluster_id', None) - if cluster_id: - cluster_data = {"cluster_id": cluster_id} - self._add_row(cluster_table, cluster_data) - if vns_id: - vns_data = {"vns_id": vns_id} - self._add_row(vns_table, vns_data) - cloud_id = server_data.get('cloud_id', None) - if cloud_id: - cloud_data = {"cloud_id": cloud_id} - self._add_row(cloud_table, cloud_data) except Exception as e: raise e return 0 # End of add_server + # This function for adding server tag is slightly different + # compared with add function for other tables. The tag_data + # contains tag information for all tags. + # This function is always called with complete list of tags + # so, clear the table first. + def add_server_tags(self, tag_data): + try: + with self._con: + cursor = self._con.cursor() + cursor.executescript(""" + DELETE FROM """ + server_tags_table + ";") + for key,value in tag_data.iteritems(): + row_data = { + 'tag_id' : key, + 'value' : value } + self._add_row(server_tags_table, row_data) + except Exception as e: + raise e + # End of add_server_tags + def server_discovery(self, action, entity): try: - if 'mac' in entity: - entity['mac'] = str(EUI(entity['mac'])).replace("-", ":") - mac = entity.get("mac", None) + if 'mac_address' in entity: + entity['mac_address'] = str(EUI(entity['mac_address'])).replace("-", ":") + mac_address = entity.get("mac_address", None) if action.lower() == "add": # If this server is already present in our table, # update IP address if DHCP was not static. - servers = self._get_items(server_table, "mac", mac, True) + servers = self._get_items( + server_table, {"mac_address" : mac_address},detail=True) if servers: server = servers[0] - self._modify_row(server_table, entity, "mac", mac) + self._modify_row( + server_table, entity, + {"mac_address": mac_address}, {}) return - entity['disc_flag'] = "Y" + entity['discovered'] = "true" + entity['status'] = "server_discovered" self._add_row(server_table, entity) elif action.lower() == "delete": - servers = self.get_server("mac", mac, True) - if ((servers) and (servers[0]['disc_flag'] == "Y")): - self._delete_row(server_table, "mac", mac) + servers = self.get_server({"mac_address" : mac_address}, detail=True) + if ((servers) and (servers[0]['discovered'] == "true")): + self._delete_row(server_table, + {"mac_address" : mac_address}) else: return except: @@ -356,203 +388,205 @@ def server_discovery(self, action, entity): def add_image(self, image_data): try: - # Store image_params dictionary as a text field - image_params = image_data.pop("image_params", None) - if image_params is not None: - image_data['image_params'] = str(image_params) + # Store image_parameters dictionary as a text field + image_parameters = image_data.pop("parameters", None) + if image_parameters is not None: + image_data['parameters'] = str(image_parameters) self._add_row(image_table, image_data) except Exception as e: raise e # End of add_image - def delete_cluster(self, cluster_id): - try: - self.check_obj("cluster", "cluster_id", cluster_id) - servers = self.get_server('cluster_id', cluster_id, True) - for server in servers: - server_data = {} - server_data['cluster_id'] = '' - self._modify_row(server_table, server_data, "server_id", server['server_id']) - self._delete_row(cluster_table, "cluster_id", cluster_id) - except Exception as e: - raise e - # End of delete_cluster - - def delete_vns(self, vns_id, force=False): + def delete_cluster(self, match_dict=None, unmatch_dict=None): try: - self.check_obj("vns", "vns_id", vns_id) - servers = self.get_server('vns_id', vns_id, True) + self.check_obj("cluster", match_dict, unmatch_dict) + cluster_id = match_dict.get("id", None) + servers = None + if cluster_id: + servers = self.get_server({'cluster_id' : cluster_id}, detail=True) if servers: - if force: - for server in servers: - server_data = {} - server_data['vns_id'] = '' - self._modify_row(server_table, server_data, \ - "server_id", server['server_id']) - else: - msg = ("Servers are present in this vns, " - "remove vns association, prior to vns delete.") - raise ServerMgrException(msg) - self._delete_row(vns_table, "vns_id", vns_id) + msg = ("Servers are present in this cluster, " + "remove cluster association, prior to cluster delete.") + raise ServerMgrException(msg) + self._delete_row(cluster_table, match_dict, unmatch_dict) except Exception as e: raise e - # End of delete_vns + # End of delete_cluster - def check_obj(self, type, match_key, match_value, raise_exception=True): + def check_obj(self, type, + match_dict=None, unmatch_dict=None, raise_exception=True): if type == "server": cb = self.get_server - db_obj = cb(match_key, match_value, detail=False) - elif type == "vns": - cb = self.get_vns - db_obj = cb(match_value, detail=False) + db_obj = cb(match_dict, unmatch_dict, detail=False) elif type == "cluster": cb = self.get_cluster - db_obj = cb(match_value, detail=False) + db_obj = cb(match_dict, unmatch_dict, detail=False) elif type == "image": cb = self.get_image - db_obj = cb(match_key, match_value, detail=False) + db_obj = cb(match_dict, unmatch_dict, detail=False) if not db_obj: - msg = "%s %s not found" % (type, match_value) + msg = "%s not found" % (type) if raise_exception: raise ServerMgrException(msg) return False return True - #end of check_obj + #end of check_obj - def delete_server(self, match_key, match_value): + def delete_server(self, match_dict=None, unmatch_dict=None): try: - if (match_key.lower() == "mac"): - if match_value: - match_value = str(EUI(match_value)).replace("-", ":") - self.check_obj("server", match_key, match_value) - self._delete_row(server_table, match_key, match_value) + if match_dict and match_dict.get("mac_address", None): + if match_dict["mac_address"]: + match_dict["mac_address"] = str( + EUI(match_dict["mac_address"])).replace("-", ":") + if unmatch_dict and unmatch_dict.get("mac_address", None): + if unmatch_dict["mac_address"]: + unmatch_dict["mac_address"] = str( + EUI(unmatch_dict["mac_address"])).replace("-", ":") + self.check_obj("server", match_dict, unmatch_dict) + self._delete_row(server_table, + match_dict, unmatch_dict) except Exception as e: raise e # End of delete_server - def delete_image(self, image_id): + def delete_server_tag(self, match_dict=None, unmatch_dict=None): try: - self._delete_row(image_table, "image_id", image_id) + self._delete_row(server_tags_table, match_dict, unmatch_dict) + except Exception as e: + raise e + # End of delete_server_tag + + def delete_image(self, match_dict=None, unmatch_dict=None): + try: + self.check_obj("image", match_dict, unmatch_dict) + self._delete_row(image_table, match_dict, unmatch_dict) except Exception as e: raise e # End of delete_image - def modify_vns(self, vns_data): + def modify_cluster(self, cluster_data): try: - vns_id = vns_data.get('vns_id', None) - if not vns_id: - raise Exception("No vns id specified") - self.check_obj("vns", "vns_id", vns_id) - db_vns = self.get_vns(vns_id, detail=True) - if not db_vns: - msg = "%s is not valid" % vns_id + cluster_id = cluster_data.get('id', None) + if not cluster_id: + raise Exception("No cluster id specified") + self.check_obj("cluster", {"id" : cluster_id}) + db_cluster = self.get_cluster( + {"id" : cluster_id}, detail=True) + if not db_cluster: + msg = "%s is not valid" % cluster_id raise ServerMgrException(msg) - db_vns_params_str = db_vns[0] ['vns_params'] - db_vns_params = {} - if db_vns_params_str: - db_vns_params = eval(db_vns_params_str) - if 'uuid' not in db_vns_params: + db_cluster_params_str = db_cluster[0] ['parameters'] + db_cluster_params = {} + if db_cluster_params_str: + db_cluster_params = eval(db_cluster_params_str) + if 'uuid' not in db_cluster_params: str_uuid = str(uuid.uuid4()) - vns_data["vns_params"].update({"uuid":str_uuid}) - # Store vns_params dictionary as a text field - vns_params = vns_data.pop("vns_params", {}) - for k,v in vns_params.iteritems(): + cluster_data["parameters"].update({"uuid":str_uuid}) + # Store cluster_params dictionary as a text field + cluster_params = cluster_data.pop("parameters", {}) + for k,v in cluster_params.iteritems(): if v == '""': v = '' - db_vns_params[k] = v - vns_params = db_vns_params - if vns_params is not None: - vns_data['vns_params'] = str(vns_params) + db_cluster_params[k] = v + cluster_params = db_cluster_params + if cluster_params is not None: + cluster_data['parameters'] = str(cluster_params) # Store email list as text field - email = vns_data.pop("email", None) + email = cluster_data.pop("email", None) if email is not None: - vns_data['email'] = str(email) - self._modify_row(vns_table, vns_data, - 'vns_id', vns_id) + cluster_data['email'] = str(email) + self._modify_row( + cluster_table, cluster_data, + {'id' : cluster_id}, {}) except Exception as e: raise e - # End of modify_vns + # End of modify_cluster def modify_image(self, image_data): try: - image_id = image_data.get('image_id', None) + image_id = image_data.get('id', None) if not image_id: raise Exception("No image id specified") #Reject if non mutable field changes - db_image = self.get_image('image_id', image_data['image_id'], - detail=True) - #if image_data['image_path'] != db_image[0]['image_path']: - # raise ServerMgrException('Image path cannnot be modified') - #TODO image path can be added in the db - image_data.pop("image_path", None) - if image_data['image_type'] != db_image[0]['image_type']: + db_image = self.get_image( + {'id' : image_data['id']}, + detail=True) + if image_data['path'] != db_image[0]['path']: + raise ServerMgrException('Image path cannnot be modified') + if image_data['type'] != db_image[0]['type']: raise ServerMgrException('Image type cannnot be modified') # Store image_params dictionary as a text field - image_params = image_data.pop("image_params", None) - if image_params is not None: - image_data['image_params'] = str(image_params) - self._modify_row(image_table, image_data, - 'image_id', image_id) + image_parameters = image_data.pop("parameters", None) + if image_parameters is not None: + image_data['parameters'] = str(image_parameters) + self._modify_row( + image_table, image_data, + {'id' : image_id}, {}) except Exception as e: raise e # End of modify_image def modify_server(self, server_data): db_server = None - if 'server_id' in server_data.keys(): - db_server = self.get_server('server_id', server_data['server_id'], - detail=True) - elif 'mac' in server_data.keys(): - db_server = self.get_server('mac', server_data['mac'], - detail=True) + if 'id' in server_data.keys(): + db_server = self.get_server( + {'id': server_data['id']}, + detail=True) + elif 'mac_address' in server_data.keys(): + db_server = self.get_server( + {'mac_address' : server_data['mac_address']}, + detail=True) try: - vns_id = server_data.get('vns_id', None) - if vns_id: - self.check_obj("vns", "vns_id", vns_id) - - if 'mac' in server_data: - server_data['mac'] = str( - EUI(server_data['mac'])).replace("-", ":") - server_mac = server_data.get('mac', None) + cluster_id = server_data.get('cluster_id', None) + if cluster_id: + self.check_obj("cluster", {"id" : cluster_id}) + + if 'mac_address' in server_data: + server_data['mac_address'] = str( + EUI(server_data['mac_address'])).replace("-", ":") + server_mac = server_data.get('mac_address', None) if not server_mac: - server_id = server_data.get('server_id', None) + server_id = server_data.get('id', None) if not server_id: raise Exception("No server MAC or id specified") else: server_mac = self.get_server_mac(server_id) #Check if object exists - if 'server_id' in server_data.keys() and \ + if 'id' in server_data.keys() and \ 'server_mac' in server_data.keys(): - self.check_obj('server', 'server_id', - server_data['server_id']) + self.check_obj('server', + {'id' : server_data['id']}) #Reject if primary key values change - if server_data['mac'] != db_server[0]['mac']: + if server_data['mac_address'] != db_server[0]['mac_address']: raise ServerMgrException('MAC address cannnot be modified') # Store roles list as a text field roles = server_data.pop("roles", None) if roles is not None: server_data['roles'] = str(roles) - intf_control = server_data.pop("control", None) + intf_control = server_data.pop("control_data_network", None) if intf_control: server_data['intf_control'] = str(intf_control) - intf_data = server_data.pop("data", None) - if intf_data: - server_data['intf_data'] = str(intf_data) - intf_bond = server_data.pop("bond", None) + intf_bond = server_data.pop("bond_interface", None) if intf_bond: server_data['intf_bond'] = str(intf_bond) - + # store tags if any + server_tags = server_data.pop("tag", None) + if server_tags is not None: + tags_dict = self.get_server_tags(detail=True) + rev_tags_dict = dict((v,k) for k,v in tags_dict.iteritems()) + for k,v in server_tags.iteritems(): + server_data[rev_tags_dict[k]] = v # Store server_params dictionary as a text field - server_params = server_data.pop("server_params", None) + server_params = server_data.pop("parameters", None) #if server_params is not None: # server_data['server_params'] = str(server_params) #check for modify in db server_params #Always Update DB server parmas db_server_params = {} - db_server_params_str = db_server[0] ['server_params'] + db_server_params_str = db_server[0] ['parameters'] if db_server_params_str: db_server_params = eval(db_server_params_str) if server_params: @@ -560,55 +594,66 @@ def modify_server(self, server_data): if v == '""': v = '' db_server_params[k] = v - server_data['server_params'] = str(db_server_params) + server_data['parameters'] = str(db_server_params) # Store email list as text field email = server_data.pop("email", None) if email is not None: server_data['email'] = str(email) - self._modify_row(server_table, server_data, - 'mac', server_mac) - # Create an entry for cluster, pod, rack etc if needed. - pod_id = server_data.get('pod_id', None) - if pod_id: - pod_data = {"pod_id": pod_id} - self._add_row(pod_table, pod_data) - rack_id = server_data.get('rack_id', None) - if rack_id: - rack_data = {"rack_id": rack_id} - self._add_row(rack_table, rack_data) - cluster_id = server_data.get('cluster_id', None) - if cluster_id: - cluster_data = {"cluster_id": cluster_id} - self._add_row(cluster_table, cluster_data) - if vns_id: - vns_data = {"vns_id": vns_id} - self._add_row(vns_table, vns_data) - cloud_id = server_data.get('cloud_id', None) - if cloud_id: - cloud_data = {"cloud_id": cloud_id} - self._add_row(cloud_table, cloud_data) + self._modify_row( + server_table, server_data, + {'mac_address' : server_mac}, {}) except Exception as e: raise e # End of modify_server - def get_image(self, match_key=None, match_value=None, + # This function for modifying server tag is slightly different + # compared with modify function for other tables. The tag_data + # contains tag information for all tags. + def modify_server_tags(self, tag_data): + try: + for key,value in tag_data.iteritems(): + row_data = { + 'tag_id' : key, + 'value' : value } + self._modify_row( + server_tags_table, row_data, + {'tag_id' : key}, {}) + except Exception as e: + raise e + # End of modify_server_tags + + def get_image(self, match_dict=None, unmatch_dict=None, detail=False): try: - images = self._get_items(image_table, match_key, - match_value, detail, "image_id") + images = self._get_items( + image_table, match_dict, + unmatch_dict, detail, ["id"]) except Exception as e: raise e return images # End of get_image - + def get_server_tags(self, match_dict=None, unmatch_dict=None, + detail=True): + try: + tag_dict = {} + tags = self._get_items( + server_tags_table, match_dict, + unmatch_dict, True, ["tag_id"]) + for tag in tags: + tag_dict[tag['tag_id']] = tag['value'] + except Exception as e: + raise e + return tag_dict + # End of get_server_tags def get_status(self, match_key=None, match_value=None, detail=False): try: - status = self._get_items(server_status_table, match_key, - match_value, detail, "server_id") + status = self._get_items( + server_status_table, {match_key : match_value}, + detail=detail, always_field=["id"]) except Exception as e: raise e return status @@ -616,14 +661,16 @@ def get_status(self, match_key=None, match_value=None, def put_status(self, server_data): try: - server_id = server_data.get('server_id', None) + server_id = server_data.get('id', None) if not server_id: raise Exception("No server id specified") # Store vns_params dictionary as a text field - servers = self._get_items(server_status_table, "server_id", server_id, True) + servers = self._get_items( + server_status_table, {"id" : server_id},detail=True) if servers: - self._modify_row(server_status_table, server_data, - 'server_id', server_id) + self._modify_row( + server_status_table, server_data, + {'id' : server_id}, {}) else: self._add_row(server_status_table, server_data) except Exception as e: @@ -631,62 +678,63 @@ def put_status(self, server_data): # End of put_status - def get_server(self, match_key=None, match_value=None, - detail=False): + def get_server(self, match_dict=None, unmatch_dict=None, + detail=False, field_list=None): try: - if ((match_key) and (match_key.lower() == "mac")): - if match_value: - match_value = str(EUI(match_value)).replace("-", ":") - servers = self._get_items(server_table, match_key, - match_value, detail, "server_id") + if match_dict and match_dict.get("mac_address", None): + if match_dict["mac_address"]: + match_dict["mac_address"] = str( + EUI(match_dict["mac_address"])).replace("-", ":") + # For server table, when detail is false, return server_id, mac + # and ip. + if field_list: + servers = self._get_items( + server_table, match_dict, + unmatch_dict, detail, field_list) + else: + servers = self._get_items( + server_table, match_dict, + unmatch_dict, detail, ["id", "mac_address", "ip_address"]) except Exception as e: raise e return servers # End of get_server - def get_cluster(self, cluster_id=None, - detail=False): + def get_cluster(self, match_dict=None, + unmatch_dict=None, detail=False): try: - clusters = self._get_items(cluster_table, "cluster_id", - cluster_id, detail, "cluster_id") + cluster = self._get_items( + cluster_table, match_dict, + unmatch_dict, detail, ["id"]) except Exception as e: raise e - return clusters + return cluster # End of get_cluster - def get_vns(self, vns_id=None, - detail=False): - try: - vns = self._get_items(vns_table, "vns_id", - vns_id, detail, "vns_id") - except Exception as e: - raise e - return vns - # End of get_vns - - # If any UUIDs are missing from an existing VNS, we add them during ServerManager DB init - def update_vns_uuids(self, vns): + # If any UUIDs are missing from an existing Cluster, we add them during ServerManager DB init + def update_cluster_uuids(self, cluster): try: - db_vns_params_str = vns['vns_params'] - db_vns_params = {} - if db_vns_params_str: - db_vns_params = eval(db_vns_params_str) - if 'uuid' not in db_vns_params: + db_cluster_params_str = cluster['parameters'] + db_cluster_params = {} + if db_cluster_params_str: + db_cluster_params = eval(db_cluster_params_str) + if 'uuid' not in db_cluster_params: str_uuid = str(uuid.uuid4()) - db_vns_params.update({"uuid": str_uuid}) - if 'storage_fsid' not in db_vns_params: + db_cluster_params.update({"uuid": str_uuid}) + if 'storage_fsid' not in db_cluster_params: storage_fsid = str(uuid.uuid4()) - db_vns_params.update({"storage_fsid": storage_fsid}) - if 'storage_virsh_uuid' not in db_vns_params: + db_cluster_params.update({"storage_fsid": storage_fsid}) + if 'storage_virsh_uuid' not in db_cluster_params: storage_virsh_uuid = str(uuid.uuid4()) - db_vns_params.update({"storage_virsh_uuid": storage_virsh_uuid}) + db_cluster_params.update({"storage_virsh_uuid": storage_virsh_uuid}) except Exception as e: raise e - vns['vns_params'] = str(db_vns_params) - self._modify_row(vns_table, vns, - 'vns_id', vns['vns_id']) - # End of update_vns_uuids + cluster['parameters'] = str(db_cluster_params) + self._modify_row( + cluster_table, cluster, + {'id' : cluster['id']}, {}) + # End of update_cluster_uuids # End class ServerMgrDb diff --git a/src/server_mgr_defaults.py b/src/server_mgr_defaults.py index 520a7112..e9fc1be8 100644 --- a/src/server_mgr_defaults.py +++ b/src/server_mgr_defaults.py @@ -2,77 +2,76 @@ #validation DS server_fields = { - "match_keys": "['server_id', 'mac', 'cluster_id', 'rack_id', 'pod_id', 'vns_id', 'ip']", + "match_keys": "['id', 'mac_address', 'cluster_id', 'ip_address', 'tag']", "obj_name": "server", - "primary_keys": "['server_id', 'mac']", - "server_id": "", - "mac": "", - "ip": "", - "server_params": """{ - 'compute_non_mgmt_ip': '', - 'compute_non_mgmt_gway': '' + "primary_keys": "['id', 'mac_address']", + "id": "", + "host_name": "", + "mac_address": "", + "ip_address": "", + "parameters": """{ + 'interface_name': '', + 'partition': '', }""", "roles": ["config","openstack","control","compute","collector","webui","database"], "cluster_id": "", - "vns_id": "", - "mask": "", - "gway": "", - "passwd": "", + "subnet_mask": "", + "gateway": "", + "password": "", "domain": "", "email": "", - "power_user": "", - "power_type": "", - "power_pass": "", - "control": "", - "bond": "", - "power_address": "" + "ipmi_username": "", + "ipmi_type": "", + "ipmi_password": "", + "control_data_network": "", + "bond_interface": "", + "ipmi_address": "", + "tag": "" } -vns_fields = { - "match_keys": "['vns_id']", - "obj_name": "vns", - "vns_id": "", +cluster_fields = { + "match_keys": "['id']", + "obj_name": "cluster", + "id": "", "email": "", - "primary_keys": "['vns_id']", - "vns_params": """{ + "primary_keys": "['id']", + "parameters": """{ 'router_asn': '64512', 'database_dir': '/home/cassandra', - 'db_initial_token': '', + 'database_token': '', 'openstack_mgmt_ip': '', - 'use_certs': 'False', + 'use_certificates': 'False', 'multi_tenancy': 'False', - 'encap_priority': 'MPLSoUDP,MPLSoGRE,VXLAN', + 'encapsulation_priority': 'MPLSoUDP,MPLSoGRE,VXLAN', 'service_token': 'contrail123', - 'ks_user': 'admin', - 'ks_passwd': 'contrail123', - 'ks_tenant': 'admin', + 'keystone_username': 'admin', + 'keystone_password': 'contrail123', + 'keystone_tenant': 'admin', 'openstack_passwd': 'contrail123', 'analytics_data_ttl': '168', - 'compute_non_mgmt_ip': '', - 'compute_non_mgmt_gway': '', 'haproxy': 'disable', - 'mask': '255.255.255.0', - 'gway': '10.204.221.46', - 'passwd': 'c0ntrail123', - 'ext_bgp': '', + 'subnet_mask': '255.255.255.0', + 'gateway': '10.204.221.46', + 'password': 'c0ntrail123', + 'external_bgp': '', + 'internal_vip': '', + 'external_vip': '', + 'contrail_internal_vip': '', + 'contrail_external_vip': '', + 'nfs_glance_path': '', + 'nfs_server': '', 'domain': 'contrail.juniper.net' }""" } -cluster_fields = { - "match_keys": "['cluster_id']", - "obj_name": "cluster", - "primary_keys": "['cluster_id']", - "cluster_id": "" -} - image_fields = { - "match_keys": "['image_id']", + "match_keys": "['id']", "obj_name": "image", - "primary_keys": "['image_id']", - "image_id": "", - "image_type": "", - "image_version": "", - "image_path": "" + "primary_keys": "['id']", + "id": "", + "type": "", + "version": "", + "path": "" } +email_events = ["reimage_started", "reimage_completed", "provision_started", "provision_completed"] diff --git a/src/server_mgr_logger.py b/src/server_mgr_logger.py index 137e0be5..0ca9bde1 100755 --- a/src/server_mgr_logger.py +++ b/src/server_mgr_logger.py @@ -72,28 +72,26 @@ def __new__(cls): # __new__ always a classmethod class ServerMgrTransactionlogger: GET_SMGR_CFG_ALL = "GET_SMGR_ALL" GET_SMGR_CFG_CLUSTER = "GET_SMGR_CLUSTER" - GET_SMGR_CFG_VNS = "GET_SMGR_VNS" GET_SMGR_CFG_SERVER = "GET_SMGR_SERVER" GET_SMGR_CFG_IMAGE = "GET_SMGR_IMAGE" GET_SMGR_CFG_STATUS = "GET_SMGR_STATUS" + GET_SMGR_CFG_TAG = "GET_SMGR_TAG" PUT_SMGR_CFG_ALL = "PUT_SMGR_ALL" PUT_SMGR_CFG_CLUSTER = "PUT_SMGR_CLUSTER" - PUT_SMGR_CFG_VNS = "PUT_SMGR_VNS" PUT_SMGR_CFG_SERVER = "PUT_SMGR_SERVER" PUT_SMGR_CFG_IMAGE = "PUT_SMGR_IMAGE" PUT_SMGR_CFG_STATUS = "PUT_SMGR_STATUS" + PUT_SMGR_CFG_TAG = "PUT_SMGR_TAG" DELETE_SMGR_CFG_ALL = "DELETE_SMGR_ALL" DELETE_SMGR_CFG_CLUSTER = "DELETE_SMGR_CLUSTER" - DELETE_SMGR_CFG_VNS = "DELETE_SMGR_VNS" DELETE_SMGR_CFG_SERVER = "DELETE_SMGR_SERVER" DELETE_SMGR_CFG_IMAGE = "DELETE_SMGR_IMAGE" DELETE_SMGR_CFG_STATUS = "DELETE_SMGR_STATUS" MODIFY_SMGR_CFG_ALL = "MODIFY_SMGR_ALL" MODIFY_SMGR_CFG_CLUSTER = "MODIFY_SMGR_CLUSTER" - MODIFY_SMGR_CFG_VNS = "MODIFY_SMGR_VNS" MODIFY_SMGR_CFG_SERVER = "MODIFY_SMGR_SERVER" MODIFY_SMGR_CFG_IMAGE = "MODIFY_SMGR_IMAGE" MODIFY_SMGR_CFG_STATUS = "MODIFY_SMGR_STATUS" @@ -127,39 +125,36 @@ def log(self, data, transaction_type, success=True): elif transaction_type == self.GET_SMGR_CFG_CLUSTER: msg = "ACTION %s: %s %s" % \ (self.GET_SMGR_CFG_CLUSTER, data.query_string, success) - elif transaction_type == self.GET_SMGR_CFG_VNS: - msg = "ACTION %s: %s %s" % \ - (self.GET_SMGR_CFG_VNS, data.query_string, success) elif transaction_type == self.GET_SMGR_CFG_SERVER: msg = "ACTION %s: %s %s" % \ (self.GET_SMGR_CFG_SERVER, data.query_string, success) elif transaction_type == self.GET_SMGR_CFG_IMAGE: msg = "ACTION %s: %s %s" % \ (self.GET_SMGR_CFG_IMAGE, data.query_string, success) + elif transaction_type == self.GET_SMGR_CFG_TAG: + msg = "ACTION %s: %s %s" % \ + (self.GET_SMGR_CFG_TAG, data.query_string, success) elif transaction_type == self.PUT_SMGR_CFG_ALL: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) elif transaction_type == self.PUT_SMGR_CFG_CLUSTER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) - elif transaction_type == self.PUT_SMGR_CFG_VNS: - msg = "ACTION %s: %s %s" % \ - (transaction_type, data.query_string, success) elif transaction_type == self.PUT_SMGR_CFG_SERVER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) elif transaction_type == self.PUT_SMGR_CFG_IMAGE: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) + elif transaction_type == self.PUT_SMGR_CFG_TAG: + msg = "ACTION %s: %s %s" % \ + (transaction_type, data.query_string, success) elif transaction_type == self.DELETE_SMGR_CFG_ALL: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) elif transaction_type == self.DELETE_SMGR_CFG_CLUSTER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) - elif transaction_type == self.DELETE_SMGR_CFG_VNS: - msg = "ACTION %s: %s %s" % \ - (transaction_type, data.query_string, success) elif transaction_type == self.DELETE_SMGR_CFG_SERVER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) @@ -172,9 +167,6 @@ def log(self, data, transaction_type, success=True): elif transaction_type == self.MODIFY_SMGR_CFG_CLUSTER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) - elif transaction_type == self.MODIFY_SMGR_CFG_VNS: - msg = "ACTION %s: %s %s" % \ - (transaction_type, data.query_string, success) elif transaction_type == self.MODIFY_SMGR_CFG_SERVER: msg = "ACTION %s: %s %s" % \ (transaction_type, data.query_string, success) diff --git a/src/server_mgr_main.py b/src/server_mgr_main.py index a5d0fdb5..d14479cd 100755 --- a/src/server_mgr_main.py +++ b/src/server_mgr_main.py @@ -31,7 +31,9 @@ import ast import uuid import traceback +import platform from server_mgr_defaults import * +from server_mgr_status import * from server_mgr_db import ServerMgrDb as db from server_mgr_cobbler import ServerMgrCobbler as ServerMgrCobbler from server_mgr_puppet import ServerMgrPuppet as ServerMgrPuppet @@ -45,17 +47,18 @@ _WEB_HOST = '127.0.0.1' _WEB_PORT = 9001 -_DEF_CFG_DB = 'vns_server_mgr.db' +_DEF_CFG_DB = 'cluster_server_mgr.db' _DEF_SMGR_BASE_DIR = '/etc/contrail_smgr/' -_DEF_SMGR_CFG_FILE = _DEF_SMGR_BASE_DIR + 'smgr_config.ini' +_DEF_SMGR_CFG_FILE = _DEF_SMGR_BASE_DIR + 'sm-config.ini' +_SERVER_TAGS_FILE = _DEF_SMGR_BASE_DIR + 'tags.ini' _DEF_HTML_ROOT_DIR = '/var/www/html/' _DEF_COBBLER_IP = '127.0.0.1' _DEF_COBBLER_PORT = None -_DEF_COBBLER_USER = 'cobbler' -_DEF_COBBLER_PASSWD = 'cobbler' -_DEF_POWER_USER = 'ADMIN' -_DEF_POWER_PASSWD = 'ADMIN' -_DEF_POWER_TOOL = 'ipmilan' +_DEF_COBBLER_USERNAME = 'cobbler' +_DEF_COBBLER_PASSWORD = 'cobbler' +_DEF_IPMI_USERNAME = 'ADMIN' +_DEF_IPMI_PASSWORD = 'ADMIN' +_DEF_IPMI_TYPE = 'ipmilan' _DEF_PUPPET_DIR = '/etc/puppet/' @bottle.error(403) @@ -98,6 +101,10 @@ class VncServerManager(): ''' _smgr_log = None _smgr_trans_log = None + _tags_list = ['tag1', 'tag2', 'tag3', 'tag4', + 'tag5', 'tag6', 'tag7'] + _tags_dict = {} + _rev_tags_dict = {} #fileds here except match_keys, obj_name and primary_key should #match with the db columns @@ -126,31 +133,74 @@ def __init__(self, args_str=None): args_str = sys.argv[1:] self._parse_args(args_str) + # Reads the tags.ini file to get tags mapping (if it exists) + if os.path.isfile(_SERVER_TAGS_FILE): + tags_config = ConfigParser.SafeConfigParser() + tags_config.read(_SERVER_TAGS_FILE) + tags_config_dict = dict(tags_config.items("TAGS")) + for key, value in tags_config_dict.iteritems(): + if key not in self._tags_list: + self._smgr_log.log( + self._smgr_log.DEBUG, + "Invalid tag %s in tags ini file" + %(key)) + exit() + if value: + self._tags_dict[key] = value + self._rev_tags_dict[value] = key + # end if os.path.isfile() + # Connect to the cluster-servers database try: self._serverDb = db( - self._args.smgr_base_dir+self._args.db_name) + self._args.server_manager_base_dir+self._args.database_name) except: self._smgr_log.log(self._smgr_log.DEBUG, "Error Connecting to Server Database %s" - % (self._args.smgr_base_dir+self._args.db_name)) + % (self._args.server_manager_base_dir+self._args.database_name)) + exit() + + # Add server tags to the DB + try: + self._serverDb.add_server_tags(self._tags_dict) + except: + self._smgr_log.log( + self._smgr_log.ERROR, + "Error adding server tags to server manager DB") exit() # Create an instance of cobbler interface class and connect to it. try: - self._smgr_cobbler = ServerMgrCobbler(self._args.smgr_base_dir, - self._args.cobbler_ip, + self._smgr_cobbler = ServerMgrCobbler(self._args.server_manager_base_dir, + self._args.cobbler_ip_address, self._args.cobbler_port, - self._args.cobbler_user, - self._args.cobbler_passwd) + self._args.cobbler_username, + self._args.cobbler_password) except: print "Error connecting to cobbler" exit() + try: + # needed for testing... + status_thread_config = {} + status_thread_config['listen_ip'] = self._args.listen_ip_addr + status_thread_config['listen_port'] = '9002' + + status_thread = ServerMgrStatusThread( + None, "Status-Thread", status_thread_config) + # Make the thread as daemon + status_thread.daemon = True + status_thread.start() + except: + self._smgr_log.log(self._smgr_log.DEBUG, + "Error Connecting to Server Database %s" + % (self._args.server_manager_base_dir+self._args.database_name)) + exit() + # Create an instance of puppet interface class. try: - # TBD - Puppet params to be added. - self._smgr_puppet = ServerMgrPuppet(self._args.smgr_base_dir, + # TBD - Puppet parameters to be added. + self._smgr_puppet = ServerMgrPuppet(self._args.server_manager_base_dir, self._args.puppet_dir) except: self._smgr_log.log(self._smgr_log.DEBUG, "Error creating instance of puppet class") @@ -196,34 +246,29 @@ def __init__(self, args_str=None): # REST calls for GET methods (Get Info about existing records) bottle.route('/all', 'GET', self.get_server_mgr_config) bottle.route('/cluster', 'GET', self.get_cluster) - bottle.route('/vns', 'GET', self.get_vns) bottle.route('/server', 'GET', self.get_server) bottle.route('/image', 'GET', self.get_image) bottle.route('/status', 'GET', self.get_status) + bottle.route('/server_status', 'GET', self.get_server_status) + bottle.route('/tag', 'GET', self.get_server_tags) # REST calls for PUT methods (Create New Records) bottle.route('/all', 'PUT', self.create_server_mgr_config) bottle.route('/image/upload', 'PUT', self.upload_image) bottle.route('/status', 'PUT', self.put_status) - #smgr_add - bottle.route('/cluster', 'PUT', self.put_cluster) bottle.route('/server', 'PUT', self.put_server) bottle.route('/image', 'PUT', self.put_image) - bottle.route('/vns', 'PUT', self.put_vns) + bottle.route('/cluster', 'PUT', self.put_cluster) + bottle.route('/tag', 'PUT', self.put_server_tags) # REST calls for DELETE methods (Remove records) bottle.route('/cluster', 'DELETE', self.delete_cluster) - bottle.route('/vns', 'DELETE', self.delete_vns) bottle.route('/server', 'DELETE', self.delete_server) bottle.route('/image', 'DELETE', self.delete_image) # REST calls for POST methods - bottle.route('/cluster', 'POST', self.modify_cluster) - bottle.route('/vns', 'POST', self.modify_vns) - bottle.route('/server', 'POST', self.modify_server) - bottle.route('/image', 'POST', self.modify_image) bottle.route('/server/reimage', 'POST', self.reimage_server) bottle.route('/server/provision', 'POST', self.provision_server) bottle.route('/server/restart', 'POST', self.restart_server) @@ -243,7 +288,7 @@ def get_server_port(self): # end get_server_port # REST API call to get sever manager config - configuration of all - # clusters, VNSs & all servers is returned. + # clusters & all servers is returned. def get_server_mgr_config(self): self._smgr_log.log(self._smgr_log.DEBUG, "get_server_mgr_config") config = {} @@ -253,9 +298,10 @@ def get_server_mgr_config(self): # Check if request arguments has detail parameter detail = ("detail" in query_args) config['cluster'] = self._serverDb.get_cluster(detail=detail) - config['vns'] = self._serverDb.get_vns(detail=detail) config['server'] = self._serverDb.get_server(detail=detail) config['image'] = self._serverDb.get_image(detail=detail) + # always call get_server_tags with detail=True + config['tag'] = self._serverDb.get_server_tags(detail=True) except Exception as e: self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.GET_SMGR_ALL, False) @@ -267,7 +313,7 @@ def get_server_mgr_config(self): # end get_server_mgr_config # REST API call to get sever manager config - configuration of all - # clusters, with all servers and roles is returned. This call + # CLUSTERs, with all servers and roles is returned. This call # provides all the configuration as in get_server_mgr_config() call # above. This call additionally provides a way of getting all the # configuration for a particular cluster. @@ -279,57 +325,54 @@ def get_cluster(self): if ret_data["status"] == 0: match_key = ret_data["match_key"] match_value = ret_data["match_value"] + match_dict = {} + if match_key: + match_dict[match_key] = match_value detail = ret_data["detail"] - - entity = self._serverDb.get_cluster(match_value, detail) + entity = self._serverDb.get_cluster( + match_dict, detail=detail) except ServerMgrException as e: - abort(404, e.value) self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_CLUSTER, False) + self._smgr_trans_log.GET_SMGR_CFG_CLUSTER, + False) + abort(404, e.value) except Exception as e: self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_CLUSTER, False) + self._smgr_trans_log.GET_SMGR_CFG_CLUSTER, + False) self.log_trace() abort(404, repr(e)) - + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.GET_SMGR_CFG_CLUSTER, + False) self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.GET_SMGR_CFG_CLUSTER) + for x in entity: + if x.get("parameters", None) is not None: + x['parameters'] = eval(x['parameters']) return {"cluster": entity} # end get_cluster - # REST API call to get sever manager config - configuration of all - # VNSs, with all servers and roles is returned. This call - # provides all the configuration as in get_server_mgr_config() call - # above. This call additionally provides a way of getting all the - # configuration for a particular vns. - def get_vns(self): - self._smgr_log.log(self._smgr_log.DEBUG, "get_vns") + # REST API call to get list of server tags. The tags are read from + # .ini file and stored in DB. There is also a copy maintained in a + # dictionary. Since all these are synced up, we return info from + # dictionaty variable itself. + def get_server_tags(self): + self._smgr_log.log(self._smgr_log.DEBUG, "get_server_tags") try: - ret_data = self.validate_smgr_request("VNS", "GET", - bottle.request) - if ret_data["status"] == 0: - match_key = ret_data["match_key"] - match_value = ret_data["match_value"] - detail = ret_data["detail"] - entity = self._serverDb.get_vns(match_value, detail) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_VNS, - False) - abort(404, e.value) + query_args = parse_qs(urlparse(bottle.request.url).query, + keep_blank_values=True) + tag_dict = self._tags_dict.copy() except Exception as e: self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_VNS, + self._smgr_trans_log.GET_SMGR_CFG_TAG, False) self.log_trace() abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_VNS, - False) self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.GET_SMGR_CFG_VNS) - return {"vns": entity} - # end get_vns + self._smgr_trans_log.GET_SMGR_CFG_TAG) + return tag_dict + # end get_server_tags def validate_smgr_entity(self, type, entity): obj_list = entity.get(type, None) @@ -359,6 +402,9 @@ def validate_smgr_get(self, validation_data, request, data=None): match_key, match_value = query_args.popitem() match_keys_str = validation_data['match_keys'] match_keys = eval(match_keys_str) + # Append "discovered" as one of the values, though + # its not part of server table fields. + match_keys.append("discovered") if (match_key not in match_keys): raise ServerMgrException("Match Key not present") if match_value == None or match_value[0] == '': @@ -410,24 +456,24 @@ def validate_smgr_put(self, validation_data, request, data=None, for data_item_key, data_item_value in data.iteritems(): #If json data name is not present in list of #allowable fields silently ignore them. - if data_item_key == obj_name + "_params" and modify == False: - object_params = data_item_value - default_object_params = eval(validation_data[obj_name +'_params']) - for key,value in default_object_params.iteritems(): - if key not in object_params: + if data_item_key == "parameters" and modify == False: + object_parameters = data_item_value + default_object_parameters = eval(validation_data['parameters']) + for key,value in default_object_parameters.iteritems(): + if key not in object_parameters: msg = "Default Object param added is %s:%s" % \ (key, value) self._smgr_log.log(self._smgr_log.INFO, msg) - object_params[key] = value + object_parameters[key] = value """ - for k,v in object_params.iteritems(): - if k in default_object_params and v == '' + for k,v in object_parameters.iteritems(): + if k in default_object_parameters and v == '' if v == '""': - object_params[k] = '' + object_parameters[k] = '' """ - data[data_item_key] = object_params + data[data_item_key] = object_parameters elif data_item_key not in validation_data: # data.pop(data_item_key, None) remove_list.append(data_item_key) @@ -464,11 +510,11 @@ def validate_smgr_put(self, validation_data, request, data=None, raise ServerMgrException(msg) """ if 'roles' in data: - if 'storage' in data['roles'] and 'compute' not in data['roles']: - msg = "role 'storage' needs role 'compute' in provision file" + if 'storage-compute' in data['roles'] and 'compute' not in data['roles']: + msg = "role 'storage-compute' needs role 'compute' in provision file" raise ServerMgrException(msg) - elif 'storage-mgr' in data['roles'] and 'openstack' not in data['roles']: - msg = "role 'storage-mgr' needs role 'openstack' in provision file" + elif 'storage-master' in data['roles'] and 'openstack' not in data['roles']: + msg = "role 'storage-master' needs role 'openstack' in provision file" raise ServerMgrException(msg) return ret_data @@ -526,45 +572,37 @@ def validate_smgr_modify(self, validation_data, request, data = None): #TODO Handle replace return ret_data - def _validate_roles(self, match_key, match_value): - if match_key == 'server_id': - server = self._serverDb.get_server( - "server_id", match_value, detail=True) - if server and server[0]: - vns_id = server [0] ['vns_id'] - if vns_id is None: - msg = ("No VNS associated with server %s") % (match_value) - raise ServerMgrException(msg) - else: - msg = ("No server present for %s") % (match_value) - raise ServerMgrException(msg) - elif match_key == 'vns_id': - vns_id = match_value - + def _validate_roles(self, cluster_id): + # get list of all servers in this cluster servers = self._serverDb.get_server( - 'vns_id', vns_id, detail=True) + {'cluster_id': cluster_id}, detail=True) role_list = [ "database", "openstack", "config", "control", "collector", "webui", "compute" ] roles_set = set(role_list) - optional_role_list = ["storage", "storage-mgr"] + optional_role_list = ["storage-compute", "storage-master"] optional_role_set = set(optional_role_list) - vns_role_list = [] + cluster_role_list = [] for server in servers: - vns_role_list.extend(eval(server['roles'])) + duplicate_roles = self.list_duplicates(eval(server['roles'])) + if len(duplicate_roles): + msg = "Duplicate Roles '%s' present" % \ + ", ".join(str(e) for e in duplicate_roles) + raise ServerMgrException(msg) + cluster_role_list.extend(eval(server['roles'])) - vns_unique_roles = set(vns_role_list) + cluster_unique_roles = set(cluster_role_list) - missing_roles = roles_set.difference(vns_unique_roles) + missing_roles = roles_set.difference(cluster_unique_roles) if len(missing_roles): msg = "Mandatory roles \"%s\" are not present" % \ ", ".join(str(e) for e in missing_roles) self._smgr_log.log(self._smgr_log.DEBUG, msg) raise ServerMgrException(msg) - unknown_roles = vns_unique_roles.difference(roles_set) + unknown_roles = cluster_unique_roles.difference(roles_set) unknown_roles.difference_update(optional_role_set) if len(unknown_roles): @@ -575,6 +613,15 @@ def _validate_roles(self, match_key, match_value): return 0 + def list_duplicates(self, seq): + seen = set() + seen_add = seen.add + # adds all elements it doesn't know yet to seen and all other to + # seen_twice + seen_twice = set( x for x in seq if x in seen or seen_add(x) ) + # turn the set into a list (as requested) + return list( seen_twice ) + def validate_smgr_provision(self, validation_data, request , data=None): ret_data = {} @@ -585,14 +632,14 @@ def validate_smgr_provision(self, validation_data, request , data=None): if package_image_id is None: msg = "No contrail package specified for provisioning" raise ServerMgrException(msg) - req_provision_params = entity.pop("provision_params", None) + req_provision_params = entity.pop("provision_parameters", None) # if req_provision_params are specified, check contents for # validity, store the info in DB and proceed with the # provisioning step. if req_provision_params is not None: role_list = [ "database", "openstack", "config", - "control", "collector", "webui", "compute", "zookeeper", "storage", "storage-mgr"] + "control", "collector", "webui", "compute", "zookeeper", "storage-compute", "storage-master"] roles = req_provision_params.get("roles", None) if roles is None: msg = "No provisioning roles specified" @@ -617,24 +664,24 @@ def validate_smgr_provision(self, validation_data, request , data=None): prov_servers[server].append(key) # end for server # end for key - vns_id = None + cluster_id = None servers = [] for key in prov_servers: server = self._serverDb.get_server( - "server_id", key, detail=True) + {"id" : key}, detail=True) if server: server = server[0] servers.append(server) - if ((vns_id != None) and - (server['vns_id'] != vns_id)): - msg = "all servers must belong to same vns" + if ((cluster_id != None) and + (server['cluster_id'] != cluster_id)): + msg = "all servers must belong to same cluster" raise ServerMgrException(msg) - vns_id = server['vns_id'] + cluster_id = server['cluster_id'] # end for #Modify the roles for key, value in prov_servers.iteritems(): new_server = { - 'server_id' : key, + 'id' : key, 'roles' : value } self._serverDb.modify_server(new_server) # end for @@ -643,6 +690,7 @@ def validate_smgr_provision(self, validation_data, request , data=None): raise ServerMgrException(msg) ret_data["status"] = 0 ret_data["servers"] = servers + ret_data["package_image_id"] = package_image_id else: if (len(entity) == 0): msg = "No servers specified" @@ -651,22 +699,29 @@ def validate_smgr_provision(self, validation_data, request , data=None): match_key, match_value = entity.popitem() # check that match key is a valid one if (match_key not in ( - "server_id", "mac", "cluster_id", - "rack_id", "pod_id", "vns_id")): + "id", "mac_address", "cluster_id", "tag")): msg = "Invalid Query arguments" raise ServerMgrException(msg) else: msg = "No servers specified" raise ServerMgrException(msg) # end else + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value servers = self._serverDb.get_server( - match_key, match_value, detail=True) + match_dict, detail=True) if len(servers) == 0: msg = "No servers found for %s" % \ (match_value) raise ServerMgrException(msg) - - self._validate_roles(match_key, match_value) + cluster_id = servers[0]['cluster_id'] + if not cluster_id: + msg = ("No Clusterassociated with server %s") % (match_value) + raise ServerMgrException(msg) + self._validate_roles(cluster_id) ret_data["status"] = 0 ret_data["servers"] = servers ret_data["package_image_id"] = package_image_id @@ -691,8 +746,8 @@ def validate_smgr_reboot(self, validation_data, request , data=None): elif len(entity) == 1: match_key, match_value = entity.popitem() # check that match key is a valid one - if (match_key not in ("server_id", "mac", "cluster_id", - "rack_id", "pod_id", "vns_id")): + if (match_key not in ("id", "mac_address", + "tag", "cluster_id")): msg = "Invalid Query arguments" raise ServerMgrException(msg) else: @@ -731,8 +786,8 @@ def validate_smgr_reimage(self, validation_data, request , data=None): elif len(entity) == 1: match_key, match_value = entity.popitem() # check that match key is a valid one - if (match_key not in ("server_id", "mac", "cluster_id", - "rack_id", "pod_id", "vns_id")): + if (match_key not in ("id", "mac_address", + "tag","cluster_id")): msg = "Invalid Query arguments" raise ServerMgrException(msg) else: @@ -753,18 +808,13 @@ def validate_smgr_request(self, type, oper, request, data = None, modify = False): ret_data = {} ret_data['status'] = 1 - - ret_data = {} - ret_data['status'] = 1 if type == "SERVER": validation_data = server_fields - elif type == "VNS": - validation_data = vns_fields elif type == "CLUSTER": validation_data = cluster_fields elif type == "IMAGE": validation_data = image_fields - else: + else: validation_data = None if oper == "GET": @@ -782,6 +832,67 @@ def validate_smgr_request(self, type, oper, request, data = None, modify = elif oper == "REIMAGE": return self.validate_smgr_reimage(validation_data, request, data) + # This function converts the string of tags received in REST call and make + # a dictionary of tag keys that can be passed to match servers from DB. + # The match_value (tags received are in form tag1=value,tag2=value etc. + # This function maps the tag name to tag number and value and makes + # a dictionary of those. + def _process_server_tags(self, match_value): + if not match_value: + return {} + match_dict = {} + tag_list = match_value.split(',') + for x in tag_list: + tag = x.strip().split('=') + if tag[0] in self._rev_tags_dict: + match_dict[self._rev_tags_dict[tag[0]]] = tag[1] + else: + msg = ("Unknown tag %s specified" %( + tag[0])) + self._smgr_log.log( + self._smgr_log.INFO, msg) + raise ServerMgrException(msg) + # end else + return match_dict + # end _process_server_tags + + # This call returns status information about a provided server. If no server + # if provided, information about all the servers in server manager + # configuration is returned. + def get_server_status(self): + ret_data = None + self._smgr_log.log(self._smgr_log.DEBUG, "get_server_status") + try: + ret_data = self.validate_smgr_request("SERVER", "GET", + bottle.request) + if ret_data["status"] == 0: + match_key = ret_data["match_key"] + match_value = ret_data["match_value"] + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value + detail = False + servers = self._serverDb.get_server( + match_dict, detail=detail , + field_list = ["id", "mac_address", "ip_address", "status"]) + except ServerMgrException as e: + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.GET_SMGR_CFG_SERVER, False) + abort(404, e.value) + except Exception as e: + self.log_trace() + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.GET_SMGR_CFG_SERVER, False) + abort(404, repr(e)) + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.GET_SMGR_CFG_SERVER) + # Convert some of the fields in server entry to match what is accepted for put + return {"server": servers} + # end get_server_status + + # This call returns information about a provided server. If no server # if provided, information about all the servers in server manager @@ -795,9 +906,14 @@ def get_server(self): if ret_data["status"] == 0: match_key = ret_data["match_key"] match_value = ret_data["match_value"] + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value detail = ret_data["detail"] - servers = self._serverDb.get_server(match_key, match_value, - detail) + servers = self._serverDb.get_server( + match_dict, detail=detail) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.GET_SMGR_CFG_SERVER, False) @@ -810,6 +926,26 @@ def get_server(self): self._smgr_log.log(self._smgr_log.DEBUG, servers) self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.GET_SMGR_CFG_SERVER) + # Convert some of the fields in server entry to match what is accepted for put + for x in servers: + if x.get("parameters", None) is not None: + x['parameters'] = eval(x['parameters']) + if x.get("roles", None) is not None: + x['roles'] = eval(x['roles']) + if x.get("intf_control", None) is not None: + x['control_data_network'] = eval(x['intf_control']) + x.pop('intf_control', None) + if x.get("intf_bond", None) is not None: + x['bond_interface'] = eval(x['intf_bond']) + x.pop('intf_bond', None) + if detail: + x['tag'] = {} + for i in range(1, len(self._tags_list)+1): + tag = "tag" + str(i) + if x[tag]: + x['tag'][self._tags_dict[tag]] = x.pop(tag, None) + else: + x.pop(tag, None) return {"server": servers} # end get_server @@ -822,9 +958,12 @@ def get_image(self): if ret_data["status"] == 0: match_key = ret_data["match_key"] match_value = ret_data["match_value"] + match_dict = {} + if match_key: + match_dict[match_key] = match_value detail = ret_data["detail"] - images = self._serverDb.get_image(match_key, match_value, - detail) + images = self._serverDb.get_image(match_dict, + detail=detail) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.GET_SMGR_CFG_IMAGE, False) @@ -839,47 +978,6 @@ def get_image(self): return {"image": images} # end get_image - def get_email_list(self, email): - email_to = [] - if not email: - return email_to - if email.startswith('[') and email.endswith(']'): - email_to = eval(email) - else: - email_to = [s.strip() for s in email.split(',')] - return email_to - # end get_email_list - - def send_status_mail(self, server_id, event, message): - # Get server entry and find configured e-mail - servers = self._serverDb.get_server("server_id", server_id, True) - if not servers: - msg = "No server found with server_id " + server_id - self._smgr_log.log(self._smgr_log.ERROR, msg) - return - server = servers[0] - email_to = [] - if 'email' in server and server['email']: - email_to = self.get_email_list(server['email']) - else: - # Get VNS entry to find configured e-mail - if 'vns_id' in server and server['vns_id']: - vns_id = server['vns_id'] - vns = self._serverDb.get_vns(vns_id, True) - if vns and 'email' in vns[0] and vns[0]['email']: - email_to = self.get_email_list(vns[0]['email']) - else: - self._smgr_log.log(self._smgr_log.DEBUG, - "vns or server doesn't configured for email") - return - else: - self._smgr_log.log(self._smgr_log.DEBUG, "server not associated with a vns") - return - send_mail(event, message, '', email_to, self._smgr_cobbler._cobbler_ip, '25') - msg = "An email is sent to " + ','.join(email_to) + " with content " + message - self._smgr_log.log(self._smgr_log.DEBUG, msg) - # send_status_mail - def get_obj(self, resp): try: data = json.loads(resp) @@ -893,8 +991,7 @@ def put_status(self): keep_blank_values=True) match_key, match_value = query_args.popitem() if ((match_key not in ( - "server_id", "mac", "cluster_id", - "rack_id", "pod_id", "vns_id", "ip")) or + "server_id", "mac_address", "cluster_id", "ip_address")) or (len(match_value) != 1)): self._smgr_log.log(self._smgr_log.ERROR, "Invalid Query data") abort(404, "Invalid Query arguments") @@ -903,7 +1000,7 @@ def put_status(self): server_id = match_value[0] body = bottle.request.body.read() server_data = {} - server_data['server_id'] = server_id + server_data['id'] = server_id server_data['server_status'] = body try: resp = self.get_obj(body) @@ -920,45 +1017,22 @@ def put_status(self): def get_status(self): - server_id = bottle.request.query['server_id'] - servers = self._serverDb.get_status('server_id', - server_id, True) + match_key = match_value = None + match_dict = None + if 'id' in bottle.request.query: + server_id = bottle.request.query['id'] + match_key = 'id' + match_value = server_id + match_dict[match_key] = match_value + + servers = self._serverDb.get_status( + match_dict, detail=True) + if servers: return servers[0] else: return None - def put_cluster(self): - self._smgr_log.log(self._smgr_log.DEBUG, "put_cluster") - entity = bottle.request.json - try: - self.validate_smgr_entity("cluster", entity) - clusters = entity.get('cluster', None) - for cluster in clusters: - if self._serverDb.check_obj("cluster", "cluster_id", - cluster['cluster_id'], False): - self.validate_smgr_request("CLUSTER", "PUT", bottle.request, - cluster, True) - #nothing to do for now - return - else: - self.validate_smgr_request("CLUSTER", "PUT", bottle.request, - cluster) - self._serverDb.add_cluster(cluster) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER, False) - abort(404, e.value) - except Exception as e: - self.log_trace() - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER, False) - abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER) - return entity - - def put_image(self): self._smgr_log.log(self._smgr_log.DEBUG, "add_image") entity = bottle.request.json @@ -967,8 +1041,9 @@ def put_image(self): images = entity.get("image", None) for image in images: #use macros for obj type - if self._serverDb.check_obj("image", "image_id", - image['image_id'], False): + if self._serverDb.check_obj( + "image", {"id" : image['id']}, + raise_exception=False): self.validate_smgr_request("IMAGE", "PUT", bottle.request, image, True) @@ -976,11 +1051,11 @@ def put_image(self): else: self.validate_smgr_request("IMAGE", "PUT", bottle.request, image) - image_id = image.get("image_id", None) - image_version = image.get("image_version", None) + image_id = image.get("id", None) + image_version = image.get("version", None) # Get Image type - image_type = image.get("image_type", None) - image_path = image.get("image_path", None) + image_type = image.get("type", None) + image_path = image.get("path", None) if (not image_id) or (not image_path): self._smgr_log.log(self._smgr_log.ERROR, "image id or location not specified") @@ -989,56 +1064,53 @@ def put_image(self): "centos", "fedora", "ubuntu", "contrail-ubuntu-package", "contrail-centos-package", "contrail-storage-ubuntu-package", - "esxi5.5", "esxi5.1"]): + "esxi5.5", "esxi5.1"]): self._smgr_log.log(self._smgr_log.ERROR, "image type not specified or invalid for image %s" %( image_id)) raise ServerMgrException("image type not specified or invalid for image %s" %( image_id)) - #TODO:remove this - ''' - db_images = self._serverDb.get_image( - 'image_id', image_id, False) - if db_images: - self._smgr_log.log(self._smgr_log.ERROR, - "image %s already exists" %( - image_id)) - raise ServerMgrException( - "image %s already exists" %( - image_id)) - ''' if not os.path.exists(image_path): raise ServerMgrException("image not found at %s" % \ (image_path)) extn = os.path.splitext(image_path)[1] - dest = self._args.smgr_base_dir + 'images/' + \ + dest = self._args.server_manager_base_dir + 'images/' + \ image_id + extn - subprocess.call(["cp", "-f", image_path, dest]) + subprocess.check_call(["cp", "-f", image_path, dest]) image_params = {} if ((image_type == "contrail-centos-package") or (image_type == "contrail-ubuntu-package") ): - subprocess.call( + subprocess.check_call( ["cp", "-f", dest, - self._args.html_root_dir + "contrail/images"]) + self._args.html_root_dir + "contrail/images/"]) puppet_manifest_version = self._create_repo( image_id, image_type, image_version, dest) image_params['puppet_manifest_version'] = \ puppet_manifest_version elif image_type == "contrail-storage-ubuntu-package": - subprocess.call( + subprocess.check_call( ["cp", "-f", dest, - self._args.html_root_dir + "contrail/images"]) + self._args.html_root_dir + "contrail/images/"]) self._create_repo( image_id, image_type, image_version, dest) else: self._add_image_to_cobbler(image_id, image_type, image_version, dest) image_data = { - 'image_id': image_id, - 'image_version': image_version, - 'image_type': image_type, - 'image_params' : image_params} + 'id': image_id, + 'version': image_version, + 'type': image_type, + 'path': image_path, + 'parameters' : image_params} self._serverDb.add_image(image_data) + except subprocess.CalledProcessError as e: + msg = ("put_image: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + self._smgr_trans_log.log( + bottle.request, + self._smgr_trans_log.PUT_SMGR_CFG_IMAGE, False) + abort(404, msg) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.PUT_SMGR_CFG_IMAGE, False) @@ -1053,47 +1125,61 @@ def put_image(self): self._smgr_trans_log.PUT_SMGR_CFG_IMAGE) return entity - def put_vns(self): - self._smgr_log.log(self._smgr_log.DEBUG, "put_vns") + def put_cluster(self): + self._smgr_log.log(self._smgr_log.DEBUG, "put_cluster") entity = bottle.request.json try: - self.validate_smgr_entity("vns", entity) - vns = entity.get('vns', None) - for cur_vns in vns: + self.validate_smgr_entity("cluster", entity) + cluster = entity.get('cluster', None) + for cur_cluster in cluster: #use macros for obj type - if self._serverDb.check_obj("vns", "vns_id", - cur_vns['vns_id'], False): + if self._serverDb.check_obj( + "cluster", {"id" : cur_cluster['id']}, + raise_exception=False): #TODO Handle uuid here - self.validate_smgr_request("VNS", "PUT", bottle.request, - cur_vns, True) - self._serverDb.modify_vns(cur_vns) + self.validate_smgr_request("CLUSTER", "PUT", bottle.request, + cur_cluster, True) + self._serverDb.modify_cluster(cur_cluster) else: - self.validate_smgr_request("VNS", "PUT", bottle.request, - cur_vns) + self.validate_smgr_request("CLUSTER", "PUT", bottle.request, + cur_cluster) str_uuid = str(uuid.uuid4()) storage_fsid = str(uuid.uuid4()) storage_virsh_uuid = str(uuid.uuid4()) - cur_vns["vns_params"].update({"uuid": str_uuid}) - cur_vns["vns_params"].update({"storage_fsid": storage_fsid}) - cur_vns["vns_params"].update({"storage_virsh_uuid": storage_virsh_uuid}) - self._smgr_log.log(self._smgr_log.INFO, "VNS Data %s" % cur_vns) - self._serverDb.add_vns(cur_vns) + cur_cluster["parameters"].update({"uuid": str_uuid}) + cur_cluster["parameters"].update({"storage_fsid": storage_fsid}) + cur_cluster["parameters"].update({"storage_virsh_uuid": storage_virsh_uuid}) + self._smgr_log.log(self._smgr_log.INFO, "Cluster Data %s" % cur_cluster) + self._serverDb.add_cluster(cur_cluster) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_VNS, + self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER, False) abort(404, e.value) except Exception as e: self.log_trace() self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_VNS, + self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER, False) abort(404, repr(e)) self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.PUT_SMGR_CFG_VNS) + self._smgr_trans_log.PUT_SMGR_CFG_CLUSTER) return entity + # Function to validate values of tag field, if present, in received + # server json object. + def validate_server_mgr_tags(self, server): + tags = server.get("tag", None) + if tags is None: + return + for key in tags.iterkeys(): + if key not in self._rev_tags_dict: + msg = "Invalid tag %s in server entry" %( + key) + raise ServerMgrException(msg) + # end validate_server_mgr_tags + def put_server(self): self._smgr_log.log(self._smgr_log.DEBUG, "add_server") entity = bottle.request.json @@ -1103,21 +1189,21 @@ def put_server(self): self.validate_smgr_entity("server", entity) servers = entity.get("server", None) for server in servers: - #commenting out now, untill i find a better way for this code - # self.validate_smgr_request("SERVER", "PUT", bottle.request, - # server) - if self._serverDb.check_obj("server", "server_id", - server['server_id'], False): + self.validate_server_mgr_tags(server) + if self._serverDb.check_obj( + "server", {"id" : server['id']}, + raise_exception=False): #TODO - Revisit this logic # Do we need mac to be primary MAC - server_fields['primary_keys'] = "['server_id']" + server_fields['primary_keys'] = "['id']" self.validate_smgr_request("SERVER", "PUT", bottle.request, server, True) self._serverDb.modify_server(server) - server_fields['primary_keys'] = "['server_id', 'mac']" + server_fields['primary_keys'] = "['id', 'mac_address']" else: self.validate_smgr_request("SERVER", "PUT", bottle.request, server) + server['status'] = "server_added" self._serverDb.add_server(server) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, @@ -1132,6 +1218,66 @@ def put_server(self): self._smgr_trans_log.PUT_SMGR_CFG_SERVER) return entity + # Function to change tags used for grouping together servers. + def put_server_tags(self): + self._smgr_log.log(self._smgr_log.DEBUG, "add_tag") + entity = bottle.request.json + if (not entity): + abort(404, 'no tags specified') + try: + for key in entity.iterkeys(): + if key not in self._tags_list: + msg = ("Invalid tag %s " + "specified" %(key)) + self._smgr_log.log( + self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) + + for key, value in entity.iteritems(): + current_value = self._tags_dict.get(key, None) + # if tag is defined, then check if new tag name is + # different from old one. + if (current_value and + (value != current_value)): + servers = self._serverDb.get_server( + {}, {key : ''}, detail=False) + if servers: + msg = ( + "Cannot modify tag name " + "for %s, used in server table" %(key)) + self._smgr_log.log( + self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) + + for key, value in entity.iteritems(): + if value: + self._tags_dict[key] = value + self._rev_tags_dict[value] = key + else: + current_value = self._tags_dict.pop(key, None) + self._rev_tags_dict.pop(current_value, None) + # Now write to ini file + tags_config = ConfigParser.SafeConfigParser() + tags_config.add_section('TAGS') + for key, value in self._tags_dict.iteritems(): + tags_config.set('TAGS', key, value) + with open(_SERVER_TAGS_FILE, 'wb') as configfile: + tags_config.write(configfile) + # Also write the tags to DB + self._serverDb.add_server_tags(self._tags_dict) + except ServerMgrException as e: + self._smgr_trans_log.log( + bottle.request, self._smgr_trans_log.PUT_SMGR_CFG_TAG, False) + abort(404, e.value) + except Exception as e: + self.log_trace() + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.PUT_SMGR_CFG_TAG, False) + abort(404, repr(e)) + self._smgr_trans_log.log(bottle.request, + self._smgr_trans_log.PUT_SMGR_CFG_TAG) + return self._tags_dict + # end put_server_tags # API Call to add image file to server manager (file is copied at # /images/filename.iso and distro, profile @@ -1139,9 +1285,9 @@ def put_server(self): # but this call actually upload ISO image from client to the server. def upload_image(self): self._smgr_log.log(self._smgr_log.DEBUG, "upload_image") - image_id = bottle.request.forms.image_id - image_version = bottle.request.forms.image_version - image_type = bottle.request.forms.image_type + image_id = bottle.request.forms.id + image_version = bottle.request.forms.version + image_type = bottle.request.forms.type if (image_type not in [ "centos", "fedora", "ubuntu", "contrail-ubuntu-package", "contrail-centos-package", "contrail-storage-ubuntu-package"]): @@ -1149,14 +1295,14 @@ def upload_image(self): file_obj = bottle.request.files.file file_name = file_obj.filename db_images = self._serverDb.get_image( - 'image_id', image_id, False) + {'id' : image_id}, detail=False) if db_images: abort( 404, "image %s already exists" %( image_id)) extn = os.path.splitext(file_name)[1] - dest = self._args.smgr_base_dir + 'images/' + \ + dest = self._args.server_manager_base_dir + 'images/' + \ image_id + extn try: if file_obj.file: @@ -1165,31 +1311,61 @@ def upload_image(self): image_params = {} if ((image_type == "contrail-centos-package") or (image_type == "contrail-ubuntu-package")): - subprocess.call( + subprocess.check_call( ["cp", "-f", dest, - self._args.html_root_dir + "contrail/images"]) + self._args.html_root_dir + "contrail/images/"]) puppet_manifest_version = self._create_repo( image_id, image_type, image_version, dest) image_params['puppet_manifest_version'] = \ - puppet_manifest_version + puppet_manifest_version + elif image_type == "contrail-storage-ubuntu-package": + subprocess.check_call(["cp", "-f", dest, + self._args.html_root_dir + "contrail/images/"]) + self._create_repo( + image_id, image_type, image_version, dest) else: self._add_image_to_cobbler(image_id, image_type, image_version, dest) image_data = { - 'image_id': image_id, - 'image_version': image_version, - 'image_type': image_type, - 'image_params' : image_params} + 'id': image_id, + 'version': image_version, + 'type': image_type, + 'path': dest, + 'parameters' : image_params} self._serverDb.add_image(image_data) + except subprocess.CalledProcessError as e: + msg = ("upload_image: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + self._smgr_trans_log.log( + bottle.request, + self._smgr_trans_log.PUT_SMGR_CFG_IMAGE, False) + abort(404, msg) except Exception as e: self.log_trace() abort(404, repr(e)) + #TODO use the below method to return a JSON for all operations commands + #with status, Move the codes and msg to a seprate file + entity = {} + new_entity = self._add_return_status(entity, 0, "Image Uploaded") + return new_entity # End of upload_image + + #menthod to add status code and msg for json to be returned. + def _add_return_status(self, entity, code, msg): + status = {} + status['code'] = code + status['message'] = msg + entity['status'] = status + return entity + + #End of _add_return_status + # The below function takes the tgz path for puppet modules in the repo # being added, checks if that version of modules is already added to # puppet and adds it if not already added. - def _add_puppet_modules(self, puppet_modules_tgz): + def _add_puppet_modules(self, puppet_modules_tgz, image_id): tmpdirname = tempfile.mkdtemp() try: # change dir to the temp dir created @@ -1197,13 +1373,20 @@ def _add_puppet_modules(self, puppet_modules_tgz): os.chdir(tmpdirname) # Copy the tgz to tempdir cmd = ("cp -f %s ." %(puppet_modules_tgz)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # untar the puppet modules tgz file cmd = ("tar xvzf contrail-puppet-manifest.tgz > /dev/null") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) + # Changing the below logic. Instead of reading version from + # version file, use image id as the version. Image id is unique + # and hence it would be easy to correlate puppet modules to + # contrail package being added. With this change, every image would + # have it's own manifests, even though manifests between two contrail + # versions might be identical. # Extract contents of version file. - with open('version','r') as f: - version = f.read().splitlines()[0] + #with open('version','r') as f: + # version = f.read().splitlines()[0] + version = image_id # Create modules directory if it does not exist. target_dir = "/etc/puppet/modules/contrail_" + version if not os.path.isdir(target_dir): @@ -1216,21 +1399,36 @@ def _add_puppet_modules(self, puppet_modules_tgz): os.makedirs("/etc/puppet/modules/stdlib") # This contrail puppet modules version does not exist. Add it. cmd = ("cp -rf ./contrail/* " + target_dir) - subprocess.call(cmd, shell=True) - cmd = ("cp -rf ./inifile/* " + "/etc/puppet/modules/inifile") - subprocess.call(cmd, shell=True) - cmd = ("cp -rf ./ceph/* " + "/etc/puppet/modules/ceph") - subprocess.call(cmd, shell=True) - cmd = ("cp -rf ./stdlib/* " + "/etc/puppet/modules/stdlib") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) + if os.path.isdir("./inifile"): + cmd = ("cp -rf ./inifile/* " + "/etc/puppet/modules/inifile") + subprocess.check_call(cmd, shell=True) + else: + self._smgr_log.log(self._smgr_log.ERROR, "directory inifile not in source tar ball - not copied") + if os.path.isdir("./ceph"): + cmd = ("cp -rf ./ceph/* " + "/etc/puppet/modules/ceph") + subprocess.check_call(cmd, shell=True) + else: + self._smgr_log.log(self._smgr_log.ERROR, "directory ceph not in source tar ball - not copied") + if os.path.isdir("./stdlib"): + cmd = ("cp -rf ./stdlib/* " + "/etc/puppet/modules/stdlib") + subprocess.check_call(cmd, shell=True) + else: + self._smgr_log.log(self._smgr_log.ERROR, "directory stdlib not in source tar ball - not copied") # Replace the class names in .pp files to have the version number # of this contrail modules. filelist = target_dir + "/manifests/*.pp" cmd = ("sed -i \"s/__\$version__/contrail_%s/g\" %s" %( version, filelist)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) os.chdir(cwd) return version + except subprocess.CalledProcessError as e: + shutil.rmtree(tmpdirname) # delete directory + msg = ("add_puppet_modules: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) finally: try: shutil.rmtree(tmpdirname) # delete directory @@ -1248,48 +1446,53 @@ def _create_yum_repo( # create a repo-dir where we will create the repo mirror = self._args.html_root_dir+"contrail/repo/"+image_id cmd = "mkdir -p %s" %(mirror) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory to the new one created cwd = os.getcwd() os.chdir(mirror) # add wrapper package itself to the repo cmd = "cp -f %s %s" %( dest, mirror) - subprocess.call(cmd, shell=True) - # Extract .tgz of contrail puppet manifest files + subprocess.check_call(cmd, shell=True) + # Extract .tgz of contrail puppet manifest files cmd = ( "rpm2cpio %s | cpio -ivd ./opt/contrail/puppet/" "contrail-puppet-manifest.tgz > /dev/null" %(dest)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # Handle the puppet manifests in this package. puppet_modules_tgz_path = mirror + \ "/opt/contrail/puppet/contrail-puppet-manifest.tgz" puppet_manifest_version = self._add_puppet_modules( - puppet_modules_tgz_path) + puppet_modules_tgz_path, image_id) # Extract .tgz of other packages from the repo cmd = ( "rpm2cpio %s | cpio -ivd ./opt/contrail/contrail_packages/" "contrail_rpms.tgz > /dev/null" %(dest)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) cmd = ("mv ./opt/contrail/contrail_packages/contrail_rpms.tgz .") subprocess.call(cmd, shell=True) cmd = ("rm -rf opt") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # untar tgz to get all packages cmd = ("tar xvzf contrail_rpms.tgz > /dev/null") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # remove the tgz file itself, not needed any more cmd = ("rm -f contrail_rpms.tgz") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # build repo using createrepo cmd = ("createrepo . > /dev/null") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory back to original os.chdir(cwd) # cobbler add repo self._smgr_cobbler.create_repo( image_id, mirror) return puppet_manifest_version + except subprocess.CalledProcessError as e: + msg = ("create_yum_repo: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) except Exception as e: raise(e) # end _create_yum_repo @@ -1304,37 +1507,37 @@ def _create_deb_repo( # create a repo-dir where we will create the repo mirror = self._args.html_root_dir+"contrail/repo/"+image_id cmd = "mkdir -p %s" %(mirror) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory to the new one created cwd = os.getcwd() os.chdir(mirror) # add wrapper package itself to the repo cmd = "cp -f %s %s" %( dest, mirror) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # Extract .tgz of other packages from the repo cmd = ( "dpkg -x %s . > /dev/null" %(dest)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # Handle the puppet manifests in this package. puppet_modules_tgz_path = mirror + \ "/opt/contrail/puppet/contrail-puppet-manifest.tgz" puppet_manifest_version = self._add_puppet_modules( - puppet_modules_tgz_path) + puppet_modules_tgz_path, image_id) cmd = ("mv ./opt/contrail/contrail_packages/contrail_debs.tgz .") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) cmd = ("rm -rf opt") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # untar tgz to get all packages cmd = ("tar xvzf contrail_debs.tgz > /dev/null") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # remove the tgz file itself, not needed any more cmd = ("rm -f contrail_debs.tgz") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # build repo using createrepo cmd = ( "dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory back to original os.chdir(cwd) # cobbler add repo @@ -1343,6 +1546,11 @@ def _create_deb_repo( # self._smgr_cobbler.create_repo( # image_id, mirror) return puppet_manifest_version + except subprocess.CalledProcessError as e: + msg = ("create_deb_repo: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) except Exception as e: raise(e) # end _create_deb_repo @@ -1356,32 +1564,32 @@ def _create_storage_deb_repo( # create a repo-dir where we will create the repo mirror = self._args.html_root_dir+"contrail/repo/"+image_id cmd = "mkdir -p %s" %(mirror) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory to the new one created cwd = os.getcwd() os.chdir(mirror) # add wrapper package itself to the repo cmd = "cp -f %s %s" %( dest, mirror) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # Extract .tgz of other packages from the repo cmd = ( "dpkg -x %s . > /dev/null" %(dest)) - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) cmd = ("mv ./opt/contrail/contrail_packages/contrail_storage_debs.tgz .") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) cmd = ("rm -rf opt") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # untar tgz to get all packages cmd = ("tar xvzf contrail_storage_debs.tgz > /dev/null") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # remove the tgz file itself, not needed any more cmd = ("rm -f contrail_storage_debs.tgz") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # build repo using createrepo cmd = ( "dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz") - subprocess.call(cmd, shell=True) + subprocess.check_call(cmd, shell=True) # change directory back to original os.chdir(cwd) # cobbler add repo @@ -1389,6 +1597,11 @@ def _create_storage_deb_repo( # will need to revisit and make it work for ubuntu - Abhay # self._smgr_cobbler.create_repo( # image_id, mirror) + except subprocess.CalledProcessError as e: + msg = ("create_storage_deb_repo: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + raise ServerMgrException(msg) except Exception as e: raise(e) # end _create_storage_deb_repo @@ -1467,7 +1680,7 @@ def _add_image_to_cobbler(self, image_id, image_type, self._smgr_log.log(self._smgr_log.ERROR, "Invalid image type") abort(404, "invalid image type") self._mount_and_copy_iso(dest, copy_path, distro_name, - kernel_file, initrd_file) + kernel_file, initrd_file, image_type) # Setup distro information in cobbler self._smgr_cobbler.create_distro( distro_name, image_type, @@ -1498,8 +1711,10 @@ def delete_cluster(self): if ret_data["status"] == 0: match_key = ret_data["match_key"] match_value = ret_data["match_value"] - - self._serverDb.delete_cluster(match_value) + match_dict = {} + if match_key: + match_dict[match_key] = match_value + self._serverDb.delete_cluster(match_dict) except ServerMgrException as e: self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.DELETE_SMGR_CFG_CLUSTER, @@ -1514,41 +1729,10 @@ def delete_cluster(self): "Error while deleting cluster %s" % (repr(e))) abort(404, repr(e)) self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.DELETE_SMGR_CFG_CLUSTER) - return "Cluster deleted" + self._smgr_trans_log.DELETE_SMGR_CFG_CLUSTER) + return "CLUSTER deleted" # end delete_cluster - # API call to delete a vns from server manager config. Along with - # vns, all servers in that vns and associated roles are also - # deleted. - def delete_vns(self): - self._smgr_log.log(self._smgr_log.DEBUG, "delete_vns") - try: - ret_data = self.validate_smgr_request("VNS", "DELETE", - bottle.request) - if ret_data["status"] == 0: - match_key = ret_data["match_key"] - match_value = ret_data["match_value"] - force = ret_data["force"] - self._serverDb.delete_vns(match_value, force) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.DELETE_SMGR_CFG_VNS, - False) - abort(404, e.value) - except Exception as e: - self.log_trace() - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.DELETE_SMGR_CFG_VNS, - False) - self._smgr_log.log(self._smgr_log.ERROR, - "Error while deleting vns %s" % (repr(e))) - abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.DELETE_SMGR_CFG_VNS) - return "VNS deleted" - # end delete_vns - # API call to delete a server from the configuration. def delete_server(self): self._smgr_log.log(self._smgr_log.DEBUG, "delete_server") @@ -1559,20 +1743,18 @@ def delete_server(self): if ret_data["status"] == 0: match_key = ret_data["match_key"] match_value = ret_data["match_value"] + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value - servers = self._serverDb.get_server(match_key, match_value, False) - ''' - if not servers: - msg = "No Server found for match key %s" % \ - (match_key) - self._smgr_log.log(self._smgr_log.ERROR, - msg ) - raise ServerMgrException(msg) - ''' - self._serverDb.delete_server(match_key, match_value) + servers = self._serverDb.get_server( + match_dict, detail= False) + self._serverDb.delete_server(match_dict) # delete the system entries from cobbler for server in servers: - self._smgr_cobbler.delete_system(server['server_id']) + self._smgr_cobbler.delete_system(server['id']) # Sync the above information self._smgr_cobbler.sync() except ServerMgrException as e: @@ -1597,28 +1779,30 @@ def delete_server(self): def delete_image(self): self._smgr_log.log(self._smgr_log.DEBUG, "delete_image") try: - image_id = bottle.request.query.image_id + image_id = bottle.request.query.id if not image_id: msg = "Image Id not specified" raise ServerMgrException(msg) - images = self._serverDb.get_image("image_id", image_id, True) + image_dict = {"id" : image_id} + images = self._serverDb.get_image(image_dict, detail=True) if not images: msg = "Image %s doesn't exist" % (image_id) raise ServerMgrException(msg) self._smgr_log.log(self._smgr_log.ERROR, msg) image = images[0] - if ((image['image_type'] == 'contrail-ubuntu-package') or - (image['image_type'] == 'contrail-centos-package')): + if ((image['type'] == 'contrail-ubuntu-package') or + (image['type'] == 'contrail-centos-package') or + (image['type'] == 'contrail-storage-ubuntu-package')): ext_dir = { "contrail-ubuntu-package" : ".deb", - "contrail-centos-package": ".rpm" } - # remove the file - os.remove(self._args.smgr_base_dir + 'images/' + - image_id + ext_dir[image['image_type']]) + "contrail-centos-package": ".rpm", + "contrail-storage-ubuntu-package": ".deb"} + os.remove(self._args.server_manager_base_dir + 'images/' + + image_id + ext_dir[image['type']]) os.remove(self._args.html_root_dir + 'contrail/images/' + - image_id + ext_dir[image['image_type']]) + image_id + ext_dir[image['type']]) # remove repo dir shutil.rmtree( @@ -1632,14 +1816,14 @@ def delete_image(self): # Sync the above information self._smgr_cobbler.sync() # remove the file - os.remove(self._args.smgr_base_dir + 'images/' + + os.remove(self._args.server_manager_base_dir + 'images/' + image_id + '.iso') # Remove the tree copied under cobbler. dir_path = self._args.html_root_dir + \ 'contrail/images/' + image_id shutil.rmtree(dir_path, True) # remove the entry from DB - self._serverDb.delete_image(image_id) + self._serverDb.delete_image(image_dict) except ServerMgrException as e: self.log_trace() self._smgr_trans_log.log(bottle.request, @@ -1655,98 +1839,9 @@ def delete_image(self): abort(404, repr(e)) self._smgr_trans_log.log(bottle.request, self._smgr_trans_log.DELETE_SMGR_CFG_IMAGE) - return "Server Deleted" + return "Image Deleted" # End of delete_image - # API to modify parameters for a server. User can modify IP, MAC, cluster - # name (moving the server to a different cluster) , roles configured on - # the server, or server parameters. - def modify_server(self): - self._smgr_log.log(self._smgr_log.DEBUG, "modify_server") - entity = bottle.request.json - try: - self.validate_smgr_entity("server", entity) - servers = entity.get("server", None) - for server in servers: - self.validate_smgr_request("SERVER", "MODIFY", bottle.request, - server) - - self._serverDb.modify_server(server) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_SERVER, - False) - abort(404, e.value) - except Exception as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_SERVER, - False) - abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_SERVER) - return entity - # end modify_server - - # API to modify parameters for a VNS. - def modify_vns(self): - self._smgr_log.log(self._smgr_log.DEBUG, "modify_vns") - entity = bottle.request.json - try: - self.validate_smgr_entity("vns", entity) - vns_list = entity.get("vns", None) - for vns in vns_list: - self.validate_smgr_request("VNS", "MODIFY", bottle.request, - vns) - self._serverDb.modify_vns(vns) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_VNS, - False) - abort(404, e.value) - except Exception as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_VNS, - False) - - abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_VNS) - return entity - # end modify_vns - - # API to modify parameters for an image. - def modify_image(self): - self._smgr_log.log(self._smgr_log.DEBUG, "modify_image") - entity = bottle.request.json - try: - self.validate_smgr_entity("image", entity) - images = entity.get("image", None) - for image in images: - self.validate_smgr_request("IMAGE", "MODIFY", bottle.request, - image) - self._serverDb.modify_image(image) - except ServerMgrException as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_IMAGE, - False) - abort(404, e.value) - except Exception as e: - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_IMAGE, - False) - abort(404, repr(e)) - self._smgr_trans_log.log(bottle.request, - self._smgr_trans_log.MODIFY_SMGR_CFG_IMAGE) - - return entity - # end modify_image - - # API to modify parameters for a cluster. Currently no-op, but code - # will be added later to change other cluster parameters. - def modify_cluster(self): - return - # end modify_cluster - # API to create the server manager configuration DB from provided JSON # file. def create_server_mgr_config(self): @@ -1790,119 +1885,123 @@ def reimage_server(self): package_image_id = ret_data['package_image_id'] match_key = ret_data['match_key'] match_value = ret_data['match_value'] + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value do_reboot = ret_data['do_reboot'] reboot_server_list = [] - images = self._serverDb.get_image("image_id", base_image_id, True) - packages = self._serverDb.get_image("image_id", package_image_id, True) + images = self._serverDb.get_image( + {"id" : base_image_id}, detail=True) if len(images) == 0: msg = "No Image %s found" % (base_image_id) raise ServerMgrException(msg) - if ( images[0] ['image_type'] not in iso_types ): + if ( images[0] ['type'] not in iso_types ): msg = "Image %s is not an iso" % (base_image_id) raise ServerMgrException(msg) - if len(packages) == 0: - msg = "No Package %s found" % (package_image_id) - raise ServerMgrException(msg) base_image = images[0] servers = self._serverDb.get_server( - match_key, match_value, detail=True) + match_dict, detail=True) if len(servers) == 0: msg = "No Servers found for %s" % (match_value) raise ServerMgrException(msg) for server in servers: - vns = None - server_params = eval(server['server_params']) + cluster = None + server_parameters = eval(server['parameters']) # build all parameters needed for re-imaging - if server['vns_id']: - vns = self._serverDb.get_vns(server['vns_id'], - detail=True) - vns_params = {} - if vns and vns[0]['vns_params']: - vns_params = eval(vns[0]['vns_params']) - - passwd = mask = gway = domain = None - server_id = server['server_id'] - if 'passwd' in server and server['passwd']: - passwd = server['passwd'] - elif 'passwd' in vns_params and vns_params['passwd']: - passwd = vns_params['passwd'] + if server['cluster_id']: + cluster = self._serverDb.get_cluster( + {"id" : server['cluster_id']}, + detail=True) + cluster_parameters = {} + if cluster and cluster[0]['parameters']: + cluster_parameters = eval(cluster[0]['parameters']) + + password = mask = gateway = domain = None + server_id = server['id'] + if 'password' in server and server['password']: + password = server['password'] + elif 'password' in cluster_parameters and cluster_parameters['password']: + password = cluster_parameters['password'] else: msg = "Missing Password for " + server_id raise ServerMgrException(msg) - if 'mask' in server and server['mask']: - mask = server['mask'] - elif 'mask' in vns_params and vns_params['mask']: - mask = vns_params['mask'] + if 'subnet_mask' in server and server['subnet_mask']: + subnet_mask = server['subnet_mask'] + elif 'subnet_mask' in cluster_parameters and cluster_parameters['subnet_mask']: + subnet_mask = cluster_parameters['subnet_mask'] else: msg = "Missing prefix/mask for " + server_id raise ServerMgrException(msg) - if 'gway' in server and server['gway']: - gway = server['gway'] - elif 'gway' in vns_params and vns_params['gway']: - gway = vns_params['gway'] + if 'gateway' in server and server['gateway']: + gateway = server['gateway'] + elif 'gateway' in cluster_parameters and cluster_parameters['gateway']: + gateway = cluster_parameters['gateway'] else: msg = "Missing gateway for " + server_id raise ServerMgrException(msg) if 'domain' in server and server['domain']: domain = server['domain'] - elif 'domain' in vns_params and vns_params['domain']: - domain = vns_params['domain'] + elif 'domain' in cluster_parameters and cluster_parameters['domain']: + domain = cluster_parameters['domain'] else: msg = "Missing domain for " + server_id raise ServerMgrException(msg) - if 'ip' in server and server['ip']: - ip = server['ip'] + if 'ip_address' in server and server['ip_address']: + ip = server['ip_address'] else: msg = "Missing ip for " + server_id raise ServerMgrException(msg) - reimage_params = {} - if ((base_image['image_type'] == 'esxi5.1') or - (base_image['image_type'] == 'esxi5.5')): - reimage_params['server_license'] = server_params.get( + reimage_parameters = {} + if ((base_image['type'] == 'esxi5.1') or + (base_image['type'] == 'esxi5.5')): + reimage_parameters['server_license'] = server_parameters.get( 'server_license', '') - reimage_params['esx_nicname'] = server_params.get( + reimage_parameters['esx_nicname'] = server_parameters.get( 'esx_nicname', 'vmnic0') - reimage_params['server_id'] = server['server_id'] - reimage_params['server_ip'] = server['ip'] - reimage_params['server_mac'] = server['mac'] - reimage_params['server_passwd'] = self._encrypt_passwd( - passwd) - reimage_params['server_mask'] = mask - reimage_params['server_gway'] = gway - reimage_params['server_domain'] = domain - if 'ifname' not in server_params: - msg = "Missing ifname for " + server_id + reimage_parameters['server_id'] = server['id'] + reimage_parameters['server_ip'] = server['ip_address'] + reimage_parameters['server_mac'] = server['mac_address'] + reimage_parameters['server_password'] = self._encrypt_password( + password) + reimage_parameters['server_mask'] = subnet_mask + reimage_parameters['server_gateway'] = gateway + reimage_parameters['server_domain'] = domain + if 'interface_name' not in server_parameters: + msg = "Missing interface name for " + server_id raise ServerMgrException(msg) - if 'power_address' in server and server['power_address'] == None: - msg = "Missing power address for " + server_id + if 'ipmi_addresss' in server and server['ipmi_addresss'] == None: + msg = "Missing ipmi address for " + server_id raise ServerMgrException(msg) - reimage_params['server_ifname'] = server_params['ifname'] - reimage_params['power_type'] = server.get('power_type') - if not reimage_params['power_type']: - reimage_params['power_type'] = self._args.power_type - reimage_params['power_user'] = server.get('power_user') - if not reimage_params['power_user']: - reimage_params['power_user'] = self._args.power_user - reimage_params['power_pass'] = server.get('power_pass') - if not reimage_params['power_pass']: - reimage_params['power_pass'] = self._args.power_pass - reimage_params['power_address'] = server.get( - 'power_address', '') + reimage_parameters['server_ifname'] = server_parameters['interface_name'] + reimage_parameters['ipmi_type'] = server.get('ipmi_type') + if not reimage_parameters['ipmi_type']: + reimage_parameters['ipmi_type'] = self._args.ipmi_type + reimage_parameters['ipmi_username'] = server.get('ipmi_username') + if not reimage_parameters['ipmi_username']: + reimage_parameters['ipmi_username'] = self._args.ipmi_username + reimage_parameters['ipmi_password'] = server.get('ipmi_password') + if not reimage_parameters['ipmi_password']: + reimage_parameters['ipmi_password'] = self._args.ipmi_password + reimage_parameters['ipmi_address'] = server.get( + 'ipmi_address', '') + reimage_parameters['partition'] = server_parameters.get('partition', '') self._do_reimage_server( - base_image, package_image_id, reimage_params) + base_image, package_image_id, reimage_parameters) # Build list of servers to be rebooted. reboot_server = { - 'server_id' : server['server_id'], + 'id' : server['id'], 'domain' : domain, - 'ip' : server.get("ip", ""), - 'passwd' : passwd, - 'power_address' : server.get('power_address',"") } + 'ip' : server.get("ip_address", ""), + 'password' : password, + 'ipmi_address' : server.get('ipmi_address',"") } reboot_server_list.append( reboot_server) # end for server in servers @@ -1925,8 +2024,8 @@ def reimage_server(self): self._smgr_trans_log.SMGR_REIMAGE, False) print 'Exception error is: %s' % e - abort(404, "Error in upgrading Server") - return "server(s) upgraded" + abort(404, "Error in reimaging the Server") + return "server(s) reimage issued" # end reimage_server # API call to power-cycle the server (IMPI Interface) @@ -1941,45 +2040,52 @@ def restart_server(self): do_net_boot = ret_data['net_boot'] match_key = ret_data['match_key'] match_value = ret_data['match_value'] + match_dict = {} + if match_key == "tag": + match_dict = self._process_server_tags(match_value) + elif match_key: + match_dict[match_key] = match_value reboot_server_list = [] + # if the key is server_id, server_table server key is 'id' servers = self._serverDb.get_server( - match_key, match_value, detail=True) + match_dict, detail=True) if len(servers) == 0: msg = "No Servers found for match %s" % \ (match_value) raise ServerMgrException(msg) for server in servers: - vns = None - #if its None,It gets the VNS list - if server['vns_id']: - vns = self._serverDb.get_vns(server['vns_id'], - detail=True) - vns_params = {} - if vns and vns[0]['vns_params']: - vns_params = eval(vns[0]['vns_params']) - - server_id = server['server_id'] - if 'passwd' in server: - passwd = server['passwd'] - elif 'passwd' in vns_params: - passwd = vns_params['passwd'] + cluster = None + #if its None,It gets the CLUSTER list + if server['cluster_id']: + cluster = self._serverDb.get_cluster( + {"id" : server['cluster_id']}, + detail=True) + cluster_parameters = {} + if cluster and cluster[0]['parameters']: + cluster_parameters = eval(cluster[0]['parameters']) + + server_id = server['id'] + if 'password' in server: + password = server['password'] + elif 'password' in cluster_parameters: + password = cluster_parameters['password'] else: abort(404, "Missing password for " + server_id) if 'domain' in server and server['domain']: domain = server['domain'] - elif 'domain' in vns_params and vns_params['domain']: - domain = vns_params['domain'] + elif 'domain' in cluster_parameters and cluster_parameters['domain']: + domain = cluster_parameters['domain'] else: abort(404, "Missing Domain for " + server_id) # Build list of servers to be rebooted. reboot_server = { - 'server_id' : server['server_id'], + 'id' : server['id'], 'domain' : domain, - 'ip' : server.get("ip", ""), - 'passwd' : passwd, - 'power_address' : server.get('power_address',"") } + 'ip' : server.get("ip_address", ""), + 'password' : password, + 'ipmi_address' : server.get('ipmi_address',"") } reboot_server_list.append( reboot_server) # end for server in servers @@ -2003,10 +2109,10 @@ def restart_server(self): return status_msg # end restart_server - # Function to get all servers in a VNS configured for given role. - def role_get_servers(self, vns_servers, role_type): + # Function to get all servers in a Cluster configured for given role. + def role_get_servers(self, cluster_servers, role_type): servers = [] - for server in vns_servers: + for server in cluster_servers: role_set = set(eval(server['roles'])) if role_type in role_set: servers.append(server) @@ -2014,14 +2120,14 @@ def role_get_servers(self, vns_servers, role_type): #Function to get control section for all servers # belonging to the same VN - def get_control_net(self, vns_servers): + def get_control_net(self, cluster_servers): server_control_list = {} - for server in vns_servers: + for server in cluster_servers: if 'intf_control' not in server: intf_control = "" else: intf_control = server['intf_control'] - server_control_list[server['ip']] = intf_control + server_control_list[server['ip_address']] = intf_control return server_control_list # Function to get map server name to server ip @@ -2031,9 +2137,9 @@ def get_server_ip_list(self, server_names, servers): server_ips = [] for server_name in server_names: for server in servers: - if server['server_id'] == server_name: + if server['id'] == server_name: server_ips.append( - server['server_ip']) + server['ip_address']) break # end if # end for server @@ -2052,9 +2158,9 @@ def log_trace(self): if not exc_type or not exc_value or not exc_traceback: return self._smgr_log.log(self._smgr_log.DEBUG, "*****TRACEBACK-START*****") - tb_lines = traceback.format_exception(exc_type, exc_value, + tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback) - for tb_line in tb_lines: + for tb_line in tb_lines: self._smgr_log.log(self._smgr_log.DEBUG, tb_line) self._smgr_log.log(self._smgr_log.DEBUG, "*****TRACEBACK-END******") @@ -2075,7 +2181,7 @@ def log_trace(self): # puppet manifest file for the server and adds it to site # manifest file. def provision_server(self): - package_type_list = ["contrail-ubuntu-package", "contrail-centos-package"] + package_type_list = ["contrail-ubuntu-package", "contrail-centos-package", "contrail-storage-ubuntu-package"] self._smgr_log.log(self._smgr_log.DEBUG, "provision_server") try: entity = bottle.request.json @@ -2094,46 +2200,49 @@ def provision_server(self): msg = "Error validating request" raise ServerMgrException(msg) - # Calculate the total number of disks in the vns + # Calculate the total number of disks in the cluster total_osd = int(0) - + num_storage_hosts = int(0) for server in servers: - server_params = eval(server['server_params']) + server_params = eval(server['parameters']) server_roles = eval(server['roles']) - if 'storage' in server_roles and 'disks' in server_params: - total_osd += len(server_params['disks']) + if 'storage-compute' in server_roles: + if 'disks' in server_params and len(server_params['disks']) > 0: + total_osd += len(server_params['disks']) + num_storage_hosts += 1 else: pass - packages = self._serverDb.get_image("image_id", package_image_id, True) + packages = self._serverDb.get_image( + {"id" : package_image_id}, detail=True) if len(packages) == 0: msg = "No Package %s found" % (package_image_id) raise ServerMgrException(msg) - package_type = packages[0] ['image_type'] + package_type = packages[0] ['type'] for server in servers: - server_params = eval(server['server_params']) - vns = self._serverDb.get_vns(server['vns_id'], - detail=True)[0] - - vns_params = eval(vns['vns_params']) - # Get all the servers belonging to the VNS that this server + server_params = eval(server['parameters']) + cluster = self._serverDb.get_cluster( + {"id" : server['cluster_id']}, + detail=True)[0] + cluster_params = eval(cluster['parameters']) + # Get all the servers belonging to the CLUSTER that this server # belongs too. - vns_servers = self._serverDb.get_server( - match_key="vns_id", match_value=server["vns_id"], + cluster_servers = self._serverDb.get_server( + {"cluster_id" : server["cluster_id"]}, detail="True") - # build roles dictionary for this vns. Roles dictionary will be + # build roles dictionary for this cluster. Roles dictionary will be # keyed by role-id and value would be list of servers configured # with this role. if not role_servers: for role in ['database', 'openstack', 'config', 'control', 'collector', 'webui', - 'compute', 'storage', 'storage-mgr']: + 'compute', 'storage-compute', 'storage-master']: role_servers[role] = self.role_get_servers( - vns_servers, role) - role_ips[role] = [x["ip"] for x in role_servers[role]] - role_ids[role] = [x["server_id"] for x in role_servers[role]] + cluster_servers, role) + role_ips[role] = [x["ip_address"] for x in role_servers[role]] + role_ids[role] = [x["id"] for x in role_servers[role]] provision_params = {} #TODO there is no need for image related stuff within the for @@ -2142,31 +2251,31 @@ def provision_server(self): provision_params['package_type'] = package_type # Get puppet manifest version corresponding to this package_image_id images = self._serverDb.get_image( - "image_id", package_image_id, True) + {"id" : package_image_id}, detail=True) if not len(images): msg = "Package %s not present" % (package_image_id) self._smgr_log.log(self._smgr_log.DEBUG, msg) raise ServerMgrException(msg) image = images [0] - if image['image_type'] not in package_type_list: + if image['type'] not in package_type_list: msg = "Package %s is not a valid package." % (package_image_id) self._smgr_log.log(self._smgr_log.DEBUG, msg) raise ServerMgrException(msg) - puppet_manifest_version = eval(image['image_params'])['puppet_manifest_version'] + puppet_manifest_version = eval(image['parameters'])['puppet_manifest_version'] provision_params['puppet_manifest_version'] = puppet_manifest_version provision_params['server_mgr_ip'] = self._args.listen_ip_addr provision_params['roles'] = role_ips provision_params['role_ids'] = role_ids - provision_params['server_id'] = server['server_id'] + provision_params['server_id'] = server['id'] if server['domain']: provision_params['domain'] = server['domain'] else: - provision_params['domain'] = vns_params['domain'] + provision_params['domain'] = cluster_params['domain'] provision_params['rmq_master'] = role_ids['config'][0] - provision_params['uuid'] = vns_params['uuid'] + provision_params['uuid'] = cluster_params['uuid'] provision_params['smgr_ip'] = self._args.listen_ip_addr - if role_ids['config'][0] == server['server_id']: + if role_ids['config'][0] == server['id']: provision_params['is_rmq_master'] = "yes" else: provision_params['is_rmq_master'] = "no" @@ -2179,26 +2288,82 @@ def provision_server(self): provision_params['intf_data'] = server['intf_data'] if 'intf_bond' in server: provision_params['intf_bond'] = server['intf_bond'] - provision_params['control_net'] = self.get_control_net(vns_servers) - provision_params['server_ip'] = server['ip'] - provision_params['database_dir'] = vns_params['database_dir'] - provision_params['db_initial_token'] = vns_params['db_initial_token'] - provision_params['openstack_mgmt_ip'] = vns_params['openstack_mgmt_ip'] - provision_params['use_certs'] = vns_params['use_certs'] - provision_params['multi_tenancy'] = vns_params['multi_tenancy'] - provision_params['router_asn'] = vns_params['router_asn'] - provision_params['encap_priority'] = vns_params['encap_priority'] - provision_params['service_token'] = vns_params['service_token'] - provision_params['ks_user'] = vns_params['ks_user'] - provision_params['ks_passwd'] = vns_params['ks_passwd'] - provision_params['ks_tenant'] = vns_params['ks_tenant'] - provision_params['openstack_passwd'] = vns_params['openstack_passwd'] - provision_params['analytics_data_ttl'] = vns_params['analytics_data_ttl'] - provision_params['phy_interface'] = server_params['ifname'] - provision_params['compute_non_mgmt_ip'] = server_params['compute_non_mgmt_ip'] - provision_params['compute_non_mgmt_gway'] = server_params['compute_non_mgmt_gway'] - provision_params['server_gway'] = server['gway'] - provision_params['haproxy'] = vns_params['haproxy'] + provision_params['control_net'] = self.get_control_net(cluster_servers) + provision_params['server_ip'] = server['ip_address'] + provision_params['database_dir'] = cluster_params['database_dir'] + provision_params['database_token'] = cluster_params['database_token'] + provision_params['openstack_mgmt_ip'] = '' + provision_params['openstack_passwd'] = '' + provision_params['use_certificates'] = cluster_params['use_certificates'] + provision_params['multi_tenancy'] = cluster_params['multi_tenancy'] + provision_params['router_asn'] = cluster_params['router_asn'] + provision_params['encapsulation_priority'] = cluster_params['encapsulation_priority'] + provision_params['service_token'] = cluster_params['service_token'] + provision_params['keystone_username'] = cluster_params['keystone_username'] + provision_params['keystone_password'] = cluster_params['keystone_password'] + provision_params['keystone_tenant'] = cluster_params['keystone_tenant'] + provision_params['analytics_data_ttl'] = cluster_params['analytics_data_ttl'] + provision_params['phy_interface'] = server_params['interface_name'] + #TODO write a function which gets from server/cluster json + provision_params['password'] = server['password'] + provision_params['internal_vip'] = cluster_params['internal_vip'] + provision_params['external_vip'] = cluster_params['external_vip'] + provision_params['nfs_server'] = cluster_params['nfs_server'] + provision_params['contrail_internal_vip'] = \ + cluster_params['contrail_internal_vip'] + provision_params['contrail_external_vip'] = \ + cluster_params['contrail_external_vip'] + provision_params['nfs_glance_path'] = \ + cluster_params['nfs_glance_path'] + + provision_params['os_master'] = role_ips['openstack'][0] + match_dict = dict() + match_dict["id"] = role_ids['openstack'][0] + + servers = self._serverDb.get_server( + match_dict, None, True, + field_list = ["password"]) + provision_params['os_username'] = "root" + provision_params['os_password'] = servers[0]['password'] + + openstack_ids = role_ids['openstack'] + openstack_user_list = [] + openstack_password_list = [] + for openstack_id in openstack_ids: + match_dict["id"] = openstack_id + servers = self._serverDb.get_server( + match_dict, None, True, + field_list = ["password"]) + openstack_user_list.append("root") + openstack_password_list.append(servers[0]['password']) + provision_params['openstack_ip_list'] = role_ips['openstack'] + provision_params['openstack_user_list'] = openstack_user_list + provision_params['openstack_password_list'] = \ + openstack_password_list + + if 'gateway' in server and server['gateway']: + provision_params['server_gway'] = server['gateway'] + elif 'gateway' in cluster_params and cluster_params['gateway']: + provision_params['server_gway'] = cluster_params['gateway'] + else: + provision_params['server_gway'] = '' + + if 'kernel_upgrade' in server_params and server_params['kernel_upgrade']: + provision_params['kernel_upgrade'] = server_params['kernel_upgrade'] + elif 'kernel_upgrade' in cluster_params and cluster_params['kernel_upgrade']: + provision_params['kernel_upgrade'] = cluster_params['kernel_upgrade'] + else: + provision_params['kernel_upgrade'] = 'no' + + if 'kernel_version' in server_params and server_params['kernel_version']: + provision_params['kernel_version'] = server_params['kernel_version'] + elif 'kernel_version' in cluster_params and cluster_params['kernel_version']: + provision_params['kernel_version'] = cluster_params['kernel_version'] + else: + provision_params['kernel_version'] = '' + + + provision_params['haproxy'] = cluster_params['haproxy'] if 'setup_interface' in server_params.keys(): provision_params['setup_interface'] = \ @@ -2206,7 +2371,7 @@ def provision_server(self): else: provision_params['setup_interface'] = "No" - provision_params['haproxy'] = vns_params['haproxy'] + provision_params['haproxy'] = cluster_params['haproxy'] if 'execute_script' in server_params.keys(): provision_params['execute_script'] = server_params['execute_script'] else: @@ -2220,15 +2385,16 @@ def provision_server(self): provision_params['esx_vm_port_group'] = server_params['esx_vm_port_group'] provision_params['vm_deb'] = server_params['vm_deb'] if server_params.has_key('vm_deb') else "" provision_params['esx_vmdk'] = server_params['esx_vmdk'] - esx_servers = self._serverDb.get_server('server_id', server_params['esx_server'], - detail=True) + esx_servers = self._serverDb.get_server( + {'id' : server_params['esx_server']}, + detail=True) esx_server = esx_servers[0] - provision_params['esx_ip'] = esx_server['ip'] + provision_params['esx_ip'] = esx_server['ip_address'] provision_params['esx_username'] = "root" - provision_params['esx_passwd'] = esx_server['passwd'] + provision_params['esx_password'] = esx_server['password'] provision_params['esx_server'] = esx_server - provision_params['server_mac'] = server['mac'] - provision_params['passwd'] = server['passwd'] + provision_params['server_mac'] = server['mac_address'] + provision_params['password'] = server['password'] if 'datastore' in server_params.keys(): provision_params['datastore'] = server_params['datastore'] @@ -2244,55 +2410,62 @@ def provision_server(self): provision_params['esx_vmdk'] = "" provision_params['esx_ip'] = "" provision_params['esx_username'] = "" - provision_params['esx_passwd'] = "" + provision_params['esx_password'] = "" if interface_created: provision_params['setup_interface'] = "No" - if 'region_name' in vns_params.keys(): - provision_params['region_name'] = vns_params['region_name'] + if 'region_name' in cluster_params.keys(): + provision_params['region_name'] = cluster_params['region_name'] else: provision_params['region_name'] = "RegionOne" if 'execute_script' in server_params.keys(): provision_params['execute_script'] = server_params['execute_script'] else: provision_params['execute_script'] = "" - if 'ext_bgp' in vns_params.keys(): - provision_params['ext_bgp'] = vns_params['ext_bgp'] + if 'external_bgp' in cluster_params.keys(): + provision_params['external_bgp'] = cluster_params['external_bgp'] else: - provision_params['ext_bgp'] = "" + provision_params['external_bgp'] = "" # Storage role params + if 'subnet_mask' in server and server['subnet_mask']: + subnet_mask = server['subnet_mask'] + elif 'subnet_mask' in cluster_params and cluster_params['subnet_mask']: + subnet_mask = cluster_params['subnet_mask'] + + provision_params['subnet-mask'] = subnet_mask provision_params['host_roles'] = eval(server['roles']) provision_params['storage_num_osd'] = total_osd - provision_params['storage_fsid'] = vns_params['storage_fsid'] - provision_params['storage_virsh_uuid'] = vns_params['storage_virsh_uuid'] - if len(role_servers['storage']): - if len(role_servers['storage-mgr']) == 0: + provision_params['storage_fsid'] = cluster_params['storage_fsid'] + provision_params['storage_virsh_uuid'] = cluster_params['storage_virsh_uuid'] + provision_params['num_storage_hosts'] = num_storage_hosts + if len(role_servers['storage-compute']): + if len(role_servers['storage-master']) == 0: msg = "Storage nodes can only be provisioned when there is also a Storage-Manager node" raise ServerMgrException(msg) - if 'storage_mon_secret' in vns_params.keys(): - if len(vns_params['storage_mon_secret']) == 40: - provision_params['storage_mon_secret'] = vns_params['storage_mon_secret'] + if 'storage_mon_secret' in cluster_params.keys(): + if len(cluster_params['storage_mon_secret']) == 40: + provision_params['storage_mon_secret'] = cluster_params['storage_mon_secret'] else: msg = "Storage Monitor Secret Key is the wrong length" raise ServerMgrException(msg) else: provision_params['storage_mon_secret'] = "" - if 'osd_bootstrap_key' in vns_params.keys(): - if len(vns_params['osd_bootstrap_key']) == 40: - provision_params['osd_bootstrap_key'] = vns_params['osd_bootstrap_key'] + if 'osd_bootstrap_key' in cluster_params.keys(): + if len(cluster_params['osd_bootstrap_key']) == 40: + provision_params['osd_bootstrap_key'] = cluster_params['osd_bootstrap_key'] else: msg = "OSD Bootstrap Key is the wrong length" raise ServerMgrException(msg) else: provision_params['osd_bootstrap_key'] = "" - if 'admin_key' in vns_params.keys(): - if len(vns_params['admin_key']) == 40: - provision_params['admin_key'] = vns_params['admin_key'] + if 'admin_key' in cluster_params.keys(): + if len(cluster_params['admin_key']) == 40: + provision_params['admin_key'] = cluster_params['admin_key'] else: msg = "Admin Key is the wrong length" raise ServerMgrException(msg) @@ -2303,25 +2476,44 @@ def provision_server(self): provision_params['storage_server_disks'].extend(server_params['disks']) storage_mon_host_ip_set = set() - for x in role_servers['storage']: - storage_mon_host_ip_set.add(x["ip"]) - for x in role_servers['storage-mgr']: - storage_mon_host_ip_set.add(x["ip"]) + for x in role_servers['storage-compute']: + storage_mon_host_ip_set.add(self._smgr_puppet.get_control_ip( provision_params, x["ip_address"]).strip('"')) + for x in role_servers['storage-master']: + storage_mon_host_ip_set.add(self._smgr_puppet.get_control_ip(provision_params, x["ip_address"]).strip('"')) provision_params['storage_monitor_hosts'] = list(storage_mon_host_ip_set) # Multiple Repo support if 'storage_repo_id' in server_params.keys(): - provision_params['storage_repo_id'] = server_params['storage_repo_id'] + images = self.get_image() + image_ids = dict() + for image in images['image']: + match_dict = dict() + match_dict["id"] = image['id'] + cur_image = self._serverDb.get_image(match_dict, None, detail=True) + if cur_image is not None: + image_ids[image['id']] = cur_image[0]['type'] + else: + msg = "No images found" + raise ServerMgrException(msg) + if server_params['storage_repo_id'] in image_ids: + if image_ids[server_params['storage_repo_id']] == 'contrail-storage-ubuntu-package': + provision_params['storage_repo_id'] = server_params['storage_repo_id'] + else: + msg = "Storage repo id specified doesn't match a contrail storage package" + raise ServerMgrException(msg) + else: + msg = "Storage repo id specified doesn't match any of the image ids" + raise ServerMgrException(msg) else: provision_params['storage_repo_id'] = "" # Storage manager restrictions - if len(role_servers['storage-mgr']): - if len(role_servers['storage-mgr']) > 1: - msg = "There can only be only one node with the role 'storage-mgr'" + if len(role_servers['storage-master']): + if len(role_servers['storage-master']) > 1: + msg = "There can only be only one node with the role 'storage-master'" raise ServerMgrException(msg) - elif len(role_servers['storage']) == 0: + elif len(role_servers['storage-compute']) == 0: msg = "Storage manager node needs Storage nodes to also be provisioned" raise ServerMgrException(msg) else: @@ -2357,7 +2549,7 @@ def _parse_args(self, args_str): Eg. python vnc_server_manager.py --config_file serverMgr.cfg --listen_ip_addr 127.0.0.1 --listen_port 8082 - --db_name vns_server_mgr.db + --database_name cluster_server_mgr.db --server_list myClusters.json ''' @@ -2372,19 +2564,19 @@ def _parse_args(self, args_str): args, remaining_argv = conf_parser.parse_known_args(args_str) serverMgrCfg = { - 'listen_ip_addr' : _WEB_HOST, - 'listen_port' : _WEB_PORT, - 'db_name' : _DEF_CFG_DB, - 'smgr_base_dir' : _DEF_SMGR_BASE_DIR, - 'html_root_dir' : _DEF_HTML_ROOT_DIR, - 'cobbler_ip' : _DEF_COBBLER_IP, - 'cobbler_port' : _DEF_COBBLER_PORT, - 'cobbler_user' : _DEF_COBBLER_USER, - 'cobbler_passwd' : _DEF_COBBLER_PASSWD, - 'power_user' : _DEF_POWER_USER, - 'power_pass' : _DEF_POWER_PASSWD, - 'power_type' : _DEF_POWER_TOOL, - 'puppet_dir' : _DEF_PUPPET_DIR + 'listen_ip_addr' : _WEB_HOST, + 'listen_port' : _WEB_PORT, + 'database_name' : _DEF_CFG_DB, + 'server_manager_base_dir' : _DEF_SMGR_BASE_DIR, + 'html_root_dir' : _DEF_HTML_ROOT_DIR, + 'cobbler_ip_address' : _DEF_COBBLER_IP, + 'cobbler_port' : _DEF_COBBLER_PORT, + 'cobbler_username' : _DEF_COBBLER_USERNAME, + 'cobbler_password' : _DEF_COBBLER_PASSWORD, + 'ipmi_username' : _DEF_IPMI_USERNAME, + 'ipmi_password' : _DEF_IPMI_PASSWORD, + 'ipmi_type' : _DEF_IPMI_TYPE, + 'puppet_dir' : _DEF_PUPPET_DIR } if args.config_file: @@ -2421,7 +2613,7 @@ def _parse_args(self, args_str): "-p", "--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( - "-d", "--db_name", + "-d", "--database_name", help=( "Name where server DB is maintained, default %s" % (_DEF_CFG_DB))) @@ -2448,9 +2640,9 @@ def _unmount_iso(self, mount_path): # Private method to mount a given iso before calling cobbler functions. def _mount_and_copy_iso(self, full_image_name, copy_path, distro_name, - kernel_file, initrd_file): + kernel_file, initrd_file, image_type): try: - mount_path = self._args.smgr_base_dir + "mnt/" + mount_path = self._args.server_manager_base_dir + "mnt/" self._unmount_iso(mount_path) # Make directory where ISO will be mounted return_code = subprocess.call(["mkdir", "-p", mount_path]) @@ -2468,6 +2660,26 @@ def _mount_and_copy_iso(self, full_image_name, copy_path, distro_name, # Copy the files from mounted ISO. shutil.rmtree(copy_path, True) shutil.copytree(mount_path, copy_path, True) + # Temporary Bug Fix for Corrupt Packages.gz issue reported by boot loader + # during PXE booting if using Server Manager on Ubuntu + # Final permanent fix TBD + + if platform.dist()[0].lower() == 'ubuntu' and image_type == 'ubuntu': + packages_dir_path = str(copy_path + "/dists/precise/restricted/binary-amd64") + if os.path.exists(packages_dir_path): + cwd = os.getcwd() + os.chdir(packages_dir_path) + shutil.copyfile('Packages.gz', 'Packages_copy.gz') + return_code = subprocess.call(["gunzip", "Packages_copy.gz"]) + if (return_code != 0): + return return_code + file_size = os.stat(packages_dir_path + "/Packages_copy").st_size + if file_size == 0: + shutil.move('Packages_copy', 'Packages') + else: + shutil.rmtree('Packages_copy') + os.chdir(cwd) + # End Temporary Bug Fix # Need to change mode to kernel and initrd files to read for all. kernel_file_full_path = copy_path + kernel_file return_code = subprocess.call( @@ -2507,29 +2719,31 @@ def _power_cycle_servers( self._smgr_log.log(self._smgr_log.DEBUG, "Enable netboot") self._smgr_cobbler.enable_system_netboot( - server['server_id']) + server['id']) cmd = "puppet cert clean %s.%s" % ( - server['server_id'], server['domain']) - self._smgr_log.log(self._smgr_log.DEBUG, - cmd) - subprocess.call(cmd, shell=True) + server['id'], server['domain']) + ret_code = subprocess.call(cmd, shell=True) + self._smgr_log.log( + self._smgr_log.DEBUG, + cmd + "; ret_code = %d" %(ret_code)) # Remove manifest file for this server cmd = "rm -f /etc/puppet/manifests/%s.%s.pp" %( - server['server_id'], server['domain']) - self._smgr_log.log(self._smgr_log.DEBUG, - cmd) - - subprocess.call(cmd, shell=True) + server['id'], server['domain']) + ret_code = subprocess.call(cmd, shell=True) + self._smgr_log.log( + self._smgr_log.DEBUG, + cmd + "; ret_code = %d" %(ret_code)) # Remove entry for that server from site.pp cmd = "sed -i \"/%s.%s.pp/d\" /etc/puppet/manifests/site.pp" %( - server['server_id'], server['domain']) - self._smgr_log.log(self._smgr_log.DEBUG, - cmd) - subprocess.call(cmd, shell=True) + server['id'], server['domain']) + ret_code = subprocess.call(cmd, shell=True) + self._smgr_log.log( + self._smgr_log.DEBUG, + cmd + "; ret_code = %d" %(ret_code)) # end if - if server['power_address']: + if server['ipmi_address']: power_reboot_list.append( - server['server_id']) + server['id']) else: client = paramiko.SSHClient() client.set_missing_host_key_policy( @@ -2538,19 +2752,27 @@ def _power_cycle_servers( stdin, stdout, stderr = client.exec_command('reboot') # end else # Update Server table to update time. - update = {'server_id': server['server_id'], - 'update_time': strftime( + update = {'id': server['id'], + 'status' : 'restart_issued', + 'last_update': strftime( "%Y-%m-%d %H:%M:%S", gmtime())} self._serverDb.modify_server(update) - success_list.append(server['server_id']) + success_list.append(server['id']) + except subprocess.CalledProcessError as e: + msg = ("power_cycle_servers: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) + self._smgr_log.log(self._smgr_log.ERROR, + "Failed re-booting for server %s" % \ + (server['id'])) + failed_list.append(server['id']) except Exception as e: self._smgr_log.log(self._smgr_log.ERROR, repr(e)) - self._smgr_log.log(self._smgr_log.ERROR, "Failed re-booting for server %s" % \ - (server['server_id'])) - failed_list.append(server['server_id']) + (server['id'])) + failed_list.append(server['id']) #end for self._smgr_cobbler.sync() if power_reboot_list: @@ -2574,10 +2796,10 @@ def _power_cycle_servers( # end _power_cycle_servers - def _encrypt_passwd(self, server_passwd): + def _encrypt_password(self, server_password): try: xyz = subprocess.Popen( - ["openssl", "passwd", "-1", "-noverify", server_passwd], + ["openssl", "passwd", "-1", "-noverify", server_password], stdout=subprocess.PIPE).communicate()[0] except: return None @@ -2586,32 +2808,33 @@ def _encrypt_passwd(self, server_passwd): # Internal private call to upgrade server. This is called by REST # API update_server and upgrade_cluster def _do_reimage_server(self, base_image, - package_image_id, reimage_params): + package_image_id, reimage_parameters): try: # Profile name is based on image name. - profile_name = base_image['image_id'] + profile_name = base_image['id'] # Setup system information in cobbler self._smgr_cobbler.create_system( - reimage_params['server_id'], profile_name, package_image_id, - reimage_params['server_mac'], reimage_params['server_ip'], - reimage_params['server_mask'], reimage_params['server_gway'], - reimage_params['server_domain'], reimage_params['server_ifname'], - reimage_params['server_passwd'], - reimage_params.get('server_license', ''), - reimage_params.get('esx_nicname', 'vmnic0'), - reimage_params.get('power_type',self._args.power_type), - reimage_params.get('power_user',self._args.power_user), - reimage_params.get('power_pass',self._args.power_pass), - reimage_params.get('power_address',''), - base_image, self._args.listen_ip_addr) + reimage_parameters['server_id'], profile_name, package_image_id, + reimage_parameters['server_mac'], reimage_parameters['server_ip'], + reimage_parameters['server_mask'], reimage_parameters['server_gateway'], + reimage_parameters['server_domain'], reimage_parameters['server_ifname'], + reimage_parameters['server_password'], + reimage_parameters.get('server_license', ''), + reimage_parameters.get('esx_nicname', 'vmnic0'), + reimage_parameters.get('ipmi_type',self._args.ipmi_type), + reimage_parameters.get('ipmi_username',self._args.ipmi_username), + reimage_parameters.get('ipmi_password',self._args.ipmi_password), + reimage_parameters.get('ipmi_address',''), + base_image, self._args.listen_ip_addr, + reimage_parameters.get('partition', '')) # Sync the above information #self._smgr_cobbler.sync() # Update Server table to add image name update = { - 'mac': reimage_params['server_mac'], - 'base_image_id': base_image['image_id'], + 'mac_address': reimage_parameters['server_mac'], + 'base_image_id': base_image['id'], 'package_image_id': package_image_id} self._serverDb.modify_server(update) @@ -2625,37 +2848,41 @@ def _do_reimage_server(self, base_image, # Internal private call to provision server. This is called by REST API # provision_server and provision_cluster - def _do_provision_server(self, provision_params): + def _do_provision_server(self, provision_parameters): try: # Now call puppet to provision the server. + cfg_path = "/etc/puppet/modules/contrail_%s/files/%s.cfg" % \ + (provision_parameters['package_image_id'], provision_parameters['server_id']) + rc = subprocess.check_call( + ["rm", "-f" , cfg_path]) self._smgr_puppet.provision_server( - provision_params) + provision_parameters) # Now kickstart agent run on the target - host_name = provision_params['server_id'] + "." + \ - provision_params.get('domain', '') - rc = subprocess.call( + host_name = provision_parameters['server_id'] + "." + \ + provision_parameters.get('domain', '') + rc = subprocess.check_call( ["puppet", "kick", "--host", host_name]) + # Log, return error if return code is non-null - TBD Abhay # TBD Update Server table to stamp provisioned time. # update = {'server_id':server_id, # 'image_id':image_id} # self._serverDb.modify_server(update) - + except subprocess.CalledProcessError as e: + msg = ("do_provision_server: error %d when executing" + "\"%s\"" %(e.returncode, e.cmd)) + self._smgr_log.log(self._smgr_log.ERROR, msg) except Exception as e: raise e # end _do_provision_server def _create_server_manager_config(self, config): try: - clusters = config.get("clusters", None) - if clusters: - for cluster in clusters: + cluster_list = config.get("cluster", None) + if cluster_list: + for cluster in cluster_list: self._serverDb.add_cluster(cluster) - vns_list = config.get("vns", None) - if vns_list: - for vns in vns_list: - self._serverDb.add_vns(vns) servers = config.get("servers", None) if servers: for server in servers: @@ -2675,7 +2902,7 @@ def main(args_str=None): server_port = vnc_server_mgr.get_server_port() server_mgr_pid = os.getpid() - pid_file = "/var/run/contrail_smgrd/contrail_smgrd.pid" + pid_file = "/var/run/contrail-server-manager/contrail-server-manager.pid" dir = os.path.dirname(pid_file) if not os.path.exists(dir): os.mkdir(dir) diff --git a/src/server_mgr_puppet.py b/src/server_mgr_puppet.py index 051bae28..aedd43cd 100644 --- a/src/server_mgr_puppet.py +++ b/src/server_mgr_puppet.py @@ -91,12 +91,79 @@ def get_control_ip(self, provision_params, mgmt_ip_str): intf_control = eval(provision_params['control_net'] [mgmt_ip]) for intf,values in intf_control.items(): if intf: - return '"' + str(IPNetwork(values['ip']).ip) + '"' + return '"' + str(IPNetwork(values['ip_address']).ip) + '"' else: return '"' + provision_params['server_ip'] + '"' return '"' + mgmt_ip + '"' # end get_control_ip + ## return 1.1.1.0/24 format network and mask + def get_control_network_mask(self, provision_params, mgmt_ip_str): + intf_control = {} + ##netaddr.IPNetwork(ip_cidr).network, netaddr.IPNetwork(ip_cidr).prefixlen + mgmt_ip = mgmt_ip_str.strip("\"") + if provision_params['control_net'] [mgmt_ip]: + intf_control = eval(provision_params['control_net'] [mgmt_ip]) + for intf,values in intf_control.items(): + if intf: + self._smgr_log.log(self._smgr_log.DEBUG, "ip_address : %s" % values['ip_address']) + return '"' + str(IPNetwork(values['ip_address']).network) + '/'+ str(IPNetwork(values['ip_address']).prefixlen) + '"' + else: + self._smgr_log.log(self._smgr_log.DEBUG, "server_ip : %s" % values['server_ip']) + return '"' + str(IPNetwork(provision_params['server_ip']).network) + '/'+ str(IPNetwork(provision_params['server_ip']).prefixlen) + '"' + #return '"' + provision_params['server_ip'] + '"' + ip_address_cidr = mgmt_ip + '/' + provision_params['subnet-mask'] + return '"' + str(IPNetwork(ip_address_cidr).network) + '/'+ str(IPNetwork(ip_address_cidr).prefixlen) + '"' + #return '"' + mgmt_ip + '"' + # end get_control_network_mask + + def _update_kernel(self, provision_params): + # Get all the parameters needed to send to puppet manifest. + if 'kernel_upgrade' in provision_params and \ + provision_params['kernel_upgrade'] == "yes" and \ + 'kernel_version' in provision_params and \ + provision_params['kernel_version'] != '' : + before_param = \ + "Contrail_%s::Contrail_common::Contrail_common[\"contrail_common\"]" %( + provision_params['puppet_manifest_version']) + data = ''' # Upgrade the kernel. + contrail_%s::contrail_common::upgrade-kernel{upgrade_kernel: + contrail_kernel_version => "%s", + before => %s + }\n\n''' % (provision_params['puppet_manifest_version'], + provision_params['kernel_version'], + before_param) + return data + else: + return '' + # end _update_kernel + + def _update_provision_start(self, provision_params): + # Get all the parameters needed to send to puppet manifest. + before_param = \ + "Contrail_%s::Contrail_common::Contrail-setup-repo[\"contrail_repo\"]" %( + provision_params['puppet_manifest_version']) + data = ''' # Create repository config on target. + contrail_%s::contrail_common::report_status{provision_started: + state => "%s", + before => %s + }\n\n''' % (provision_params['puppet_manifest_version'], + "provision_started" ,before_param) + return data + # end _update_provision_start + + def _update_provision_complete(self, provision_params, require_param): + # Get all the parameters needed to send to puppet manifest. + data = ''' # Update the state of server that provision is complete + contrail_%s::contrail_common::report_status{provision_completed: + state => "%s", + require => %s + }\n\n''' % (provision_params['puppet_manifest_version'], + "provision_completed" , require_param) + return data + # end _update_provision_complete + + def _repository_config(self, provision_params): # Get all the parameters needed to send to puppet manifest. before_param = "Contrail_%s::Contrail_common::Contrail-install-repo[\"install_repo\"]" % ( @@ -110,8 +177,18 @@ def _repository_config(self, provision_params): provision_params['package_image_id'], provision_params["server_mgr_ip"], before_param) - before_param = "Contrail_%s::Contrail_common::Contrail_common[\"contrail_common\"]" % \ + if 'kernel_upgrade' in provision_params['kernel_upgrade'] and \ + provision_params['kernel_upgrade'] == "yes" and \ + 'kernel_version' in provision_params['kernel_version '] and \ + provision_params['kernel_version'] != '' : + + before_param = "Contrail_%s::Contrail_common::Upgrade-kernel[\"upgrade_kernel\"]" % \ (provision_params['puppet_manifest_version']) + else: + before_param = \ + "Contrail_%s::Contrail_common::Contrail_common[\"contrail_common\"]" %( + provision_params['puppet_manifest_version']) + data += ''' # Install repo on target. contrail_%s::contrail_common::contrail-install-repo{install_repo: @@ -121,6 +198,18 @@ def _repository_config(self, provision_params): provision_params['package_type'], before_param) + if 'storage-compute' in provision_params['host_roles'] or 'storage-master' in provision_params['host_roles']: + print "found" + data += ''' # Install storage repo on target. + contrail_%s::contrail_common::contrail-setup-repo{contrail_storage_repo: + contrail_repo_name => "%s", + contrail_server_mgr_ip => "%s", + before => %s + }\n\n''' % (provision_params['puppet_manifest_version'], + provision_params['storage_repo_id'], + provision_params["server_mgr_ip"], before_param) + + return data # end _repository_config @@ -147,8 +236,8 @@ def create_interface(self, provision_params, last_rest_added=None): bond_opts = "" if intf in intf_bonds.keys(): bond = intf_bonds[intf] - members = bond['member'] - bond_opts = bond['bond_options'] + members = bond['member_interfaces'] + bond_opts = bond['options'] require_cmd = "Contrail_%s::Contrail_common::Contrail-setup-interface[\"%s\"]" %( provision_params['puppet_manifest_version'], intf) require_list.append(require_cmd) @@ -161,7 +250,7 @@ def create_interface(self, provision_params, last_rest_added=None): contrail_gw => "%s" }\n\n''' % (provision_params['puppet_manifest_version'], intf, intf, members , bond_opts, - values['ip'], values['gw']) + values['ip_address'], values['gateway']) if provision_params['intf_data']: @@ -171,8 +260,8 @@ def create_interface(self, provision_params, last_rest_added=None): bond_opts = "" if intf in intf_bonds.keys(): bond = intf_bonds[intf] - members = bond['member'] - bond_opts = bond['bond_options'] + members = bond['member_interfaces'] + bond_opts = bond['options'] require_cmd = "Contrail_%s::Contrail_common::Contrail-setup-interface[\"%s\"]" %( provision_params['puppet_manifest_version'], intf) require_list.append(require_cmd) @@ -185,7 +274,7 @@ def create_interface(self, provision_params, last_rest_added=None): contrail_gw => "%s" }\n\n''' % (provision_params['puppet_manifest_version'], intf, intf, members , bond_opts, - values['ip'], values['gw']) + values['ip_address'], values['gateway']) data_first = ''' # Create repository config on target. contrail_%s::contrail_common::contrail-setup-repo{contrail_repo: @@ -236,36 +325,30 @@ def puppet_add_database_role(self, provision_params, last_res_added): database_server = provision_params['roles']['database'] database_ip_control = self.get_control_ip( provision_params, provision_params['server_ip']) + database_ip_control_list=[] + for item in database_server: + database_ip_control_list.append(self.get_control_ip(provision_params,str(item))) + config_server_control = self.get_control_ip( provision_params, config_server) - cassandra_seeds = ["\"%s\""%(x) for x in \ - provision_params['roles']['database']] - if len(cassandra_seeds) > 2: - cassandra_seeds_control = [ - self.get_control_ip( - provision_params,str(cassandra_seeds[0])), - self.get_control_ip( - provision_params,str(cassandra_seeds[1]))] - else: - cassandra_seeds_control = [ - self.get_control_ip( - provision_params,str(cassandra_seeds[0]))] - - config_servers = provision_params['roles']['config'] if 'zookeeper' in provision_params['roles']: zk_servers = provision_params['roles']['zookeeper'] else: zk_servers = [] db_ip_list = ["\"%s\""%(x) for x in database_server] zoo_ip_list = ["\"%s\""%(x) for x in zk_servers] - #zk_ip_list = cassandra_seeds_control + zoo_ip_list zk_ip_list_control=[] - #for itr in zk_ip_list: - # zk_ip_list_control.append(self.get_control_ip(provision_params,str(itr))) - zk_ip_list_control= cassandra_seeds_control - contrail_cfgm_index = database_server.index( + contrail_database_index = database_server.index( provision_params["server_ip"])+1 + #####- + cassandra_seeds = ["\"%s\""%(x) for x in \ + provision_params['roles']['config']] + cassandra_seeds_control_list=[] + for item in cassandra_seeds: + cassandra_seeds_control_list.append(self.get_control_ip(provision_params,str(item))) + #####- + # Build Params items if self._params_dict.get( 'contrail_database_ip', None) is None: @@ -278,11 +361,11 @@ def puppet_add_database_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_database_initial_token', None) is None: self._params_dict['contrail_database_initial_token'] = ( - "\"%s\"" %(provision_params["db_initial_token"])) + "\"%s\"" %(provision_params["database_token"])) if self._params_dict.get( 'contrail_cassandra_seeds', None) is None: self._params_dict['contrail_cassandra_seeds'] = ( - "[%s]" %(','.join(cassandra_seeds_control))) + "[%s]" %(','.join(cassandra_seeds_control_list))) if self._params_dict.get( 'system_name', None) is None: self._params_dict['system_name'] = ( @@ -294,11 +377,11 @@ def puppet_add_database_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_zookeeper_ip_list', None) is None: self._params_dict['contrail_zookeeper_ip_list'] = ( - "[%s]" %(','.join(cassandra_seeds_control))) + "[%s]" %(','.join(database_ip_control_list))) if self._params_dict.get( - 'contrail_cfgm_index', None) is None: - self._params_dict['contrail_cfgm_index'] = ( - "\"%s\"" %(contrail_cfgm_index)) + 'contrail_database_index', None) is None: + self._params_dict['contrail_database_index'] = ( + "\"%s\"" %(contrail_database_index)) # Build resource items data += ''' # contrail-database role. contrail_%s::contrail_database::contrail_database{contrail_database: @@ -335,10 +418,24 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): contrail_openstack_mgmt_ip_control=self.get_control_ip(provision_params,contrail_openstack_mgmt_ip) config_server_control=self.get_control_ip(provision_params,config_server) compute_server_control=self.get_control_ip(provision_params,compute_server) - + + openstack_ip_list = ['"'+ str(host) + '"' for host in provision_params['roles']['openstack'] ] + openstack_ip_list_wsrep = [str(host) + ':4567' for host in provision_params['roles']['openstack'] ] + + config_host_list = ['"'+ str(host) + '"' for host in \ + provision_params['role_ids']['config'] ] + openstack_ip_list_control = [] + for openstack_ip in openstack_ip_list: + openstack_ip_list_control.append(self.get_control_ip(provision_params, str(openstack_ip))) + + compute_host_list = ['"'+ str(host) + '"' for host in \ + provision_params['role_ids']['compute'] ] + config_servers_names = provision_params['role_ids']['config'] - # Keeping openstack index hardcoded untill ha is implemented - openstack_index="1" + # Keeping openstack index hardcoded untill ha is implemented + openstack_index = provision_params['roles'] \ + ['openstack'].index(provision_params["server_ip"]) + 1 + # openstack_index="1" rabbit_user_list=[] for cfgm_name in config_servers_names: rabbit_user_list.append('rabbit@'+str(cfgm_name)) @@ -348,7 +445,11 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): amqp_server = provision_params['roles']['config'][0] amqp_server_control=self.get_control_ip(provision_params,amqp_server) #End here + #TODO Check for contrail_internal_vip andtrun this into a method + if provision_params["internal_vip"] != "": + amqp_server_control = provision_params["internal_vip"] + # Build Params items if self._params_dict.get( 'contrail_openstack_ip', None) is None: @@ -376,7 +477,7 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_ks_admin_passwd', None) is None: self._params_dict['contrail_ks_admin_passwd'] = ( - "\"%s\"" %(provision_params["ks_passwd"])) + "\"%s\"" %(provision_params["keystone_password"])) if self._params_dict.get( 'contrail_haproxy', None) is None: self._params_dict['contrail_haproxy'] = ( @@ -388,7 +489,7 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_openstack_index', None) is None: self._params_dict['contrail_openstack_index'] = ( - "\"%s\"" %(openstack_index.replace('"', ''))) + "\"%s\"" %(openstack_index)) if self._params_dict.get( 'contrail_rabbit_user', None) is None: self._params_dict['contrail_rabbit_user'] = ( @@ -397,7 +498,70 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): 'contrail_cfgm_number', None) is None: self._params_dict['contrail_cfgm_number'] = ( "\"%s\"" %(len(config_servers_names))) - + if self._params_dict.get( + 'internal_vip', None) is None: + self._params_dict['internal_vip'] = ( + "\"%s\"" %(provision_params["internal_vip"])) + if self._params_dict.get( + 'external_vip', None) is None: + self._params_dict['external_vip'] = ( + "\"%s\"" %(provision_params["external_vip"])) + if self._params_dict.get( + 'nfs_server', None) is None: + self._params_dict['nfs_server'] = ( + "\"%s\"" %(provision_params["nfs_server"])) + if self._params_dict.get( + 'nfs_glance_path', None) is None: + self._params_dict['nfs_glance_path'] = ("\"%s\"" %(provision_params["nfs_glance_path"])) + if self._params_dict.get( + 'openstack_ip_list_control', None) is None: + self._params_dict['openstack_ip_list_control'] = ( + "[%s]" %(','.join(openstack_ip_list_control))) + if self._params_dict.get( + 'openstack_ip_list_wsrep', None) is None: + self._params_dict['openstack_ip_list_wsrep'] = ( + "\"%s\"" % (','.join(openstack_ip_list_wsrep))) + if self._params_dict.get( + 'compute_host_list', None) is None: + self._params_dict['compute_host_list'] = ( + "[%s]" %(','.join(compute_host_list))) + if self._params_dict.get( + 'openstack_ip_list', None) is None: + self._params_dict['openstack_ip_list'] = ( + "[%s]" %(','.join(openstack_ip_list))) + if self._params_dict.get( + 'openstack_user_list', None) is None: + self._params_dict['openstack_user_list'] = ( + "[%s]" %(','.join(provision_params['openstack_user_list']))) + if self._params_dict.get( + 'openstack_password_list', None) is None: + self._params_dict['openstack_password_list'] = ( + "[%s]" %(','.join(provision_params['openstack_password_list']))) + if self._params_dict.get( + 'config_host_list', None) is None: + self._params_dict['config_host_list'] = ( + "[%s]" %(','.join(config_host_list))) + if self._params_dict.get( + 'openstack_num_nodes', None) is None: + self._params_dict['openstack_num_nodes'] = ( + "%s" %(len(openstack_ip_list))) + #Build openstack master and username and passs + if self._params_dict.get( + 'os_username', None) is None: + self._params_dict['os_username'] = ( + "\"%s\"" %(provision_params["os_username"])) + if self._params_dict.get( + 'os_password', None) is None: + self._params_dict['os_password'] = ( + "\"%s\"" %(provision_params["os_password"])) + if self._params_dict.get( + 'os_master', None) is None: + self._params_dict['os_master'] = ( + "\"%s\"" %(provision_params["os_master"])) + if self._params_dict.get( + 'contrail_openstack_root_passwd', None) is None: + self._params_dict['contrail_openstack_root_passwd'] = ( + "\"%s\"" %(provision_params["openstack_passwd"])) # Build resource items data += ''' # contrail-openstack role. contrail_%s::contrail_openstack::contrail_openstack{contrail_openstack: @@ -408,10 +572,275 @@ def puppet_add_openstack_role(self, provision_params, last_res_added): if provision_params["haproxy"] == "enable": self.create_openstack_ha_proxy(provision_params) - + if provision_params["internal_vip"] != "": + self.create_openstack_ha_proxy_new(provision_params) return data # end puppet_add_openstack_role + #Function to create haproxy cfg for openstack nodes + def create_openstack_ha_proxy_new(self, provision_params): + + keystone_server_lines = '' + keystone_admin_server_lines = '' + glance_server_lines = '' + cinder_server_lines = '' + nova_api_server_lines = '' + nova_meta_server_lines = '' + memcached_server_lines = '' + rabbitmq_server_lines = '' + mysql_server_lines = '' + space = ' ' * (3) + + haproxy_config = '' + smgr_dir = staging_dir = "/etc/puppet/modules/contrail_"+ provision_params['puppet_manifest_version'] + "/files/" + template = string.Template("""#contrail-openstack-marker-start + listen contrail-openstack-stats :5936 + mode http + stats enable + stats uri / + stats auth $__contrail_hap_user__:$__contrail_hap_passwd__ + + frontend openstack-keystone *:5000 + default_backend keystone-backend + + backend keystone-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__keystone_backend_servers__ + + frontend openstack-keystone-admin *:35357 + default_backend keystone-admin-backend + + backend keystone-admin-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__keystone_admin_backend_servers__ + + frontend openstack-glance *:9292 + default_backend glance-backend + + backend glance-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__glance_backend_servers__ + + frontend openstack-cinder *:8776 + default_backend cinder-backend + + backend cinder-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__cinder_backend_servers__ + + frontend openstack-nova-api *:8774 + default_backend nova-api-backend + + backend nova-api-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__nova_api_backend_servers__ + + frontend openstack-nova-meta *:8775 + default_backend nova-meta-backend + + backend nova-meta-backend + option tcpka + option nolinger + srvtimeout 24h + balance roundrobin + $__nova_meta_backend_servers__ + + + listen memcached 0.0.0.0:11222 + mode tcp + balance roundrobin + option tcplog + maxconn 10000 + balance roundrobin + option tcpka + option nolinger + timeout connect 5s + timeout client 48h + timeout server 48h + $__memcached_servers__ + + listen rabbitmq 0.0.0.0:5673 + mode tcp + maxconn 10000 + balance roundrobin + option tcpka + option redispatch + timeout client 48h + timeout server 48h + $__rabbitmq_servers__ + + listen mysql 0.0.0.0:33306 + mode tcp + balance roundrobin + option tcpka + option nolinger + option redispatch + maxconn 2000 + contimeout 5s + clitimeout 24h + srvtimeout 24h + option mysql-check user root + $__mysql_servers__ + + #contrail-openstack-marker-end + """) + + openstack_ip_list = provision_params['roles']['openstack'] + + for openstack_ip in openstack_ip_list: + server_index = openstack_ip_list.index( + provision_params["server_ip"]) + 1 + + host_ip = self.get_control_ip(provision_params, openstack_ip) + host_ip = host_ip.strip('"') + space_end = ' ' * (8) + + keystone_server_lines +=\ + '%s server %s %s:6000 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + keystone_admin_server_lines +=\ + '%s server %s %s:35358 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + glance_server_lines +=\ + '%s server %s %s:9393 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + cinder_server_lines +=\ + '%s server %s %s:9776 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + nova_api_server_lines +=\ + '%s server %s %s:9774 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + nova_meta_server_lines +=\ + '%s server %s %s:9775 check inter 2000 rise 2 fall 3\n%s'\ + % (space, host_ip, host_ip, space_end) + if server_index <= 2: + memcached_server_lines +=\ + '%s server repcache%s %s:11211 check inter 2000 rise 2 fall 3\n'\ + % (space, server_index, host_ip) + rabbitmq_server_lines +=\ + '%s server rabbit%s %s:5672 check inter 2000 rise 2 fall 3 weight 1 maxconn 500\n'\ + % (space, server_index, host_ip) + mysql_server_lines +=\ + '%s server mysql%s %s:3306 weight 1\n'\ + % (space, server_index, host_ip) + + for openstack_node in openstack_ip_list: + haproxy_config = template.safe_substitute({ + '__keystone_backend_servers__' : keystone_server_lines, + '__keystone_admin_backend_servers__' : keystone_admin_server_lines, + '__glance_backend_servers__' : glance_server_lines, + '__cinder_backend_servers__' : cinder_server_lines, + '__nova_api_backend_servers__' : nova_api_server_lines, + '__nova_meta_backend_servers__' : nova_meta_server_lines, + '__memcached_servers__' : memcached_server_lines, + '__rabbitmq_servers__' : rabbitmq_server_lines, + '__mysql_servers__' : mysql_server_lines, + '__contrail_hap_user__': 'haproxy', + '__contrail_hap_passwd__': 'contrail123', + }) + + """ + for host_string in args: + with settings(host_string=host_string): + # chop old settings including pesky default from pkg... + tmp_fname = "/tmp/haproxy-%s-config" % (host_string) + get("/etc/haproxy/haproxy.cfg", tmp_fname) + with settings(warn_only=True): + local("sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s" % (tmp_fname)) + local("sed -i -e 's/*:5000/*:5001/' %s" % (tmp_fname)) + local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) + local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) + # ...generate new ones + cfg_file = open(tmp_fname, 'a') + cfg_file.write(haproxy_config) + cfg_file.close() + put(tmp_fname, "/etc/haproxy/haproxy.cfg") + local("rm %s" %(tmp_fname)) + # haproxy enable + with settings(host_string=host_string, warn_only=True): + run("chkconfig haproxy on") + run("service supervisor-openstack stop") + enable_haproxy() + run("service haproxy restart") + #Change the keystone admin/public port + run("openstack-config --set /etc/keystone/keystone.conf DEFAULT public_port 6000") + run("openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_port 35358") + + """ + ha_proxy_cfg = staging_dir + provision_params['server_id'] + ".cfg" + + if not os.path.isfile(ha_proxy_cfg): + shutil.copy2(smgr_dir + "haproxy.cfg", ha_proxy_cfg) + + cfg_file = open(ha_proxy_cfg, 'a') + cfg_file.write(haproxy_config) + cfg_file.close() + + def create_config_ha_proxy_for_collector(self, provision_params): + collector_haproxy_tmpl = string.Template("""#contrail-collector-marker-start +listen contrail-collector-stats :5938 + mode http + stats enable + stats uri / + stats auth $__contrail_hap_user__:$__contrail_hap_passwd__ + +frontend contrail-analytics-api *:8081 + default_backend contrail-analytics-api + +backend contrail-analytics-api + option nolinger + balance roundrobin +$__contrail_analytics_api_backend_servers__ + +#contrail-collector-marker-end +""") + + + + smgr_dir = staging_dir = "/etc/puppet/modules/contrail_"+ provision_params['puppet_manifest_version'] + "/files/" + contrail_analytics_api_server_lines = '' + space = ' ' * 3 + collector_role_list = provision_params['roles']['collector'] + collector_ip_list = provision_params['roles']['collector'] + + for collector_host in collector_role_list: + server_index = collector_ip_list.index( + collector_host) + 1 + + host_ip = collector_host + host_ip_control = self.get_control_ip(provision_params,host_ip).strip('"') + contrail_analytics_api_server_lines +=\ + '%s server %s %s:9081 check inter 2000 rise 2 fall 3\n'\ + % (space, host_ip, host_ip) + + haproxy_config = collector_haproxy_tmpl.safe_substitute({ + '__contrail_analytics_api_backend_servers__' : + contrail_analytics_api_server_lines, + '__contrail_hap_user__': 'haproxy', + '__contrail_hap_passwd__': 'contrail123', + }) + ha_proxy_cfg = staging_dir + provision_params['server_id'] + ".cfg" + if not os.path.isfile(ha_proxy_cfg): + shutil.copy2(smgr_dir + "haproxy.cfg", ha_proxy_cfg) + + cfg_file = open(ha_proxy_cfg, 'a') + cfg_file.write(haproxy_config) + cfg_file.close() def create_config_ha_proxy(self, provision_params): @@ -488,7 +917,9 @@ def create_config_ha_proxy(self, provision_params): }) ha_proxy_cfg = staging_dir + provision_params['server_id'] + ".cfg" - shutil.copy2(smgr_dir + "haproxy.cfg", ha_proxy_cfg) + if not os.path.isfile(ha_proxy_cfg): + shutil.copy2(smgr_dir + "haproxy.cfg", ha_proxy_cfg) + cfg_file = open(ha_proxy_cfg, 'a') cfg_file.write(haproxy_config) cfg_file.close() @@ -611,15 +1042,21 @@ def puppet_add_config_role(self, provision_params, last_res_added): config_server_control=self.get_control_ip(provision_params,config_server) config_servers_names = provision_params['role_ids']['config'] # Keeping openstack index hardcoded untill ha is implemented - openstack_index="1" +# openstack_index="1" +# openstack_index = provision_params['roles'] \ +# ['openstack'].index(provision_params["server_ip"]) + 1 + rabbit_user_list=[] for cfgm_name in config_servers_names: rabbit_user_list.append('rabbit@'+str(cfgm_name)) rabbit_user_list=str(rabbit_user_list) rabbit_user_list=rabbit_user_list.replace(" ","") - #Chhnadak + #Get the amqp ip based on ha policy amqp_server = provision_params['roles']['config'][0] amqp_server_control=self.get_control_ip(provision_params,amqp_server) + #TODO Check for contrail_internal_vip andtrun this into a method + if provision_params["internal_vip"] != "": + amqp_server_control = provision_params["internal_vip"] #End here # Build Params items @@ -638,7 +1075,7 @@ def puppet_add_config_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_use_certs', None) is None: self._params_dict['contrail_use_certs'] = ( - "\"%s\"" %(provision_params["use_certs"])) + "\"%s\"" %(provision_params["use_certificates"])) if self._params_dict.get( 'contrail_multi_tenancy', None) is None: self._params_dict['contrail_multi_tenancy'] = ( @@ -668,19 +1105,19 @@ def puppet_add_config_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_ks_admin_user', None) is None: self._params_dict['contrail_ks_admin_user'] = ( - "\"%s\"" %(provision_params["ks_user"])) + "\"%s\"" %(provision_params["keystone_username"])) if self._params_dict.get( 'contrail_ks_admin_passwd', None) is None: self._params_dict['contrail_ks_admin_passwd'] = ( - "\"%s\"" %(provision_params["ks_passwd"])) + "\"%s\"" %(provision_params["keystone_password"])) if self._params_dict.get( 'contrail_ks_admin_tenant', None) is None: self._params_dict['contrail_ks_admin_tenant'] = ( - "\"%s\"" %(provision_params["ks_tenant"])) + "\"%s\"" %(provision_params["keystone_tenant"])) if self._params_dict.get( - 'contrail_openstack_root_passwd', None) is None: - self._params_dict['contrail_openstack_root_passwd'] = ( - "\"%s\"" %(provision_params["openstack_passwd"])) + 'root_password', None) is None: + self._params_dict['root_password'] = ( + "\"%s\"" %(provision_params["password"])) if self._params_dict.get( 'contrail_cassandra_ip_list', None) is None: self._params_dict['contrail_cassandra_ip_list'] = ( @@ -742,19 +1179,19 @@ def puppet_add_config_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_encap_priority', None) is None: self._params_dict['contrail_encap_priority'] = ( - "\"%s\"" %(provision_params['encap_priority'])) + "\"%s\"" %(provision_params['encapsulation_priority'])) if self._params_dict.get( 'contrail_bgp_params', None) is None: self._params_dict['contrail_bgp_params'] = ( - "\"%s\"" %(provision_params['ext_bgp'])) + "\"%s\"" %(provision_params['external_bgp'])) if self._params_dict.get( 'contrail_amqp_server_ip', None) is None: self._params_dict['contrail_amqp_server_ip'] = ( "\"%s\"" %(amqp_server_control.replace('"', ''))) - if self._params_dict.get( - 'contrail_openstack_index', None) is None: - self._params_dict['contrail_openstack_index'] = ( - "\"%s\"" %(openstack_index.replace('"', ''))) +# if self._params_dict.get( +# 'contrail_openstack_index', None) is None: +# self._params_dict['contrail_openstack_index'] = ( +# "\"%s\"" %(openstack_index)) if self._params_dict.get( 'contrail_rabbit_user', None) is None: self._params_dict['contrail_rabbit_user'] = ( @@ -774,7 +1211,9 @@ def puppet_add_config_role(self, provision_params, last_res_added): last_res_added) #add Ha Proxy self.create_config_ha_proxy(provision_params) - + #add Ha proxy for collector + if provision_params['internal_vip'] != '': + self.create_config_ha_proxy_for_collector(provision_params) return data # end puppet_add_config_role @@ -1226,11 +1665,11 @@ def puppet_add_compute_role(self, provision_params, last_res_added): vm_params['uplink_vswitch'] = provision_params['esx_fab_vswitch'] vm_params['server'] = provision_params['esx_ip'] vm_params['username'] = provision_params['esx_username'] - vm_params['passwd'] = provision_params['esx_passwd'] + vm_params['password'] = provision_params['esx_password'] vm_params['thindisk'] = provision_params['esx_vmdk'] vm_params['smgr_ip'] = provision_params['smgr_ip']; vm_params['domain'] = provision_params['domain'] - vm_params['vm_passwd'] = provision_params['passwd'] + vm_params['vm_password'] = provision_params['password'] vm_params['vm_server'] = provision_params['server_id'] vm_params['vm_deb'] = provision_params['vm_deb'] out = ContrailVM(vm_params) @@ -1258,31 +1697,35 @@ def puppet_add_compute_role(self, provision_params, last_res_added): openstack_server = provision_params['roles']['openstack'][0] openstack_server_control= self.get_control_ip(provision_params,openstack_server) - if (provision_params['openstack_mgmt_ip'] == ''): - contrail_openstack_mgmt_ip = provision_params['roles']['openstack'][0] - else: - contrail_openstack_mgmt_ip = provision_params['openstack_mgmt_ip'] + contrail_openstack_mgmt_ip = provision_params['roles']['openstack'][0] contrail_openstack_mgmt_ip_control= self.get_control_ip(provision_params,contrail_openstack_mgmt_ip) server_ip_control= self.get_control_ip(provision_params,provision_params["server_ip"]) - #TODO Check - if provision_params["compute_non_mgmt_ip"] == "": - provision_params["compute_non_mgmt_ip"] = provision_params["server_ip"] - if provision_params["compute_non_mgmt_gway"] == "": - provision_params["compute_non_mgmt_gway"] = provision_params['server_gway'] + provision_params["compute_non_mgmt_ip"] = provision_params["server_ip"] + provision_params["compute_non_mgmt_gway"] = provision_params['server_gway'] + first_compute = "no" + + if provision_params['server_ip'] == \ + provision_params['roles']['compute'][0]: + first_compute = "yes" + + if provision_params['intf_control']: intf_control = eval(provision_params['intf_control']) for intf,values in intf_control.items(): - non_mgmt_ip= values['ip'].split("/")[0] - non_mgmt_gw= values['gw'] + non_mgmt_ip= values['ip_address'].split("/")[0] + non_mgmt_gw= values['gateway'] else: non_mgmt_ip = provision_params["compute_non_mgmt_ip"] non_mgmt_gw = provision_params["compute_non_mgmt_gway"] # Keeping openstack index hardcoded untill ha is implemented - openstack_index="1" - #Chhnadak + amqp_server = provision_params['roles']['config'][0] amqp_server_control=self.get_control_ip(provision_params,amqp_server) + #TODO Check for contrail_internal_vip andtrun this into a method + if provision_params["internal_vip"] != "": + amqp_server_control = provision_params["internal_vip"] + #End here # if provision_params['haproxy'] == 'enable': @@ -1323,10 +1766,12 @@ def puppet_add_compute_role(self, provision_params, last_res_added): 'contrail_physical_interface', None) is None: self._params_dict['contrail_physical_interface'] = ( "\"%s\"" %(provision_params["phy_interface"])) + # Restrict the numbe of control nodes to two for agent + contrail_num_controls = 2 if self._params_dict.get( 'contrail_num_controls', None) is None: self._params_dict['contrail_num_controls'] = ( - "\"%s\"" %(len(control_servers))) + "\"%s\"" %(contrail_num_controls)) if self._params_dict.get( 'contrail_non_mgmt_ip', None) is None: self._params_dict['contrail_non_mgmt_ip'] = ( @@ -1338,15 +1783,15 @@ def puppet_add_compute_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_ks_admin_user', None) is None: self._params_dict['contrail_ks_admin_user'] = ( - "\"%s\"" %(provision_params["ks_user"])) + "\"%s\"" %(provision_params["keystone_username"])) if self._params_dict.get( 'contrail_ks_admin_passwd', None) is None: self._params_dict['contrail_ks_admin_passwd'] = ( - "\"%s\"" %(provision_params["ks_passwd"])) + "\"%s\"" %(provision_params["keystone_password"])) if self._params_dict.get( 'contrail_ks_admin_tenant', None) is None: self._params_dict['contrail_ks_admin_tenant'] = ( - "\"%s\"" %(provision_params["ks_tenant"])) + "\"%s\"" %(provision_params["keystone_tenant"])) if self._params_dict.get( 'contrail_haproxy', None) is None: self._params_dict['contrail_haproxy'] = ( @@ -1362,7 +1807,7 @@ def puppet_add_compute_role(self, provision_params, last_res_added): if self._params_dict.get( 'contrail_vm_passwd', None) is None: self._params_dict['contrail_vm_passwd'] = ( - "\"%s\"" %(provision_params["esx_passwd"])) + "\"%s\"" %(provision_params["esx_password"])) if self._params_dict.get( 'contrail_vswitch', None) is None: self._params_dict['contrail_vswitch'] = ( @@ -1372,9 +1817,10 @@ def puppet_add_compute_role(self, provision_params, last_res_added): self._params_dict['contrail_amqp_server_ip'] = ( "\"%s\"" %(amqp_server_control.replace('"', ''))) if self._params_dict.get( - 'contrail_openstack_index', None) is None: - self._params_dict['contrail_openstack_index'] = ( - "\"%s\"" %(openstack_index.replace('"', ''))) + 'first_compute', None) is None: + self._params_dict['first_compute'] = ( + "\"%s\"" %(first_compute)) + # Build resource items data += ''' # contrail-compute role. contrail_%s::contrail_compute::contrail_compute{contrail_compute: @@ -1399,22 +1845,23 @@ def puppet_add_storage_role(self, provision_params, last_res_added): contrail_openstack_mgmt_ip = provision_params["server_ip"] else: contrail_openstack_mgmt_ip = provision_params['openstack_mgmt_ip'] - if provision_params['server_ip'] in provision_params['roles']['storage']: + if provision_params['server_ip'] in provision_params['roles']['storage-compute']: data += ''' # contrail-storage role. contrail_%s::contrail_storage::contrail_storage{contrail_storage: - contrail_storage_repo_id => %s, + contrail_storage_repo_id => "%s", + contrail_num_storage_hosts => %s, contrail_storage_fsid => "%s", contrail_storage_virsh_uuid => "%s", - contrail_openstack_ip => "%s", + contrail_openstack_ip => "$contrail_openstack_ip", contrail_storage_mon_secret => "%s", contrail_storage_admin_key => "%s", contrail_storage_osd_bootstrap_key => "%s", contrail_storage_mon_hosts => \"''' % ( provision_params['puppet_manifest_version'], provision_params['storage_repo_id'], + provision_params['num_storage_hosts'], provision_params['storage_fsid'], provision_params['storage_virsh_uuid'], - contrail_openstack_mgmt_ip, provision_params['storage_mon_secret'], provision_params['admin_key'], provision_params['osd_bootstrap_key']) @@ -1440,22 +1887,23 @@ def puppet_add_storage_manager_role(self, provision_params, last_res_added): contrail_openstack_mgmt_ip = provision_params["server_ip"] else: contrail_openstack_mgmt_ip = provision_params['openstack_mgmt_ip'] - if provision_params['server_ip'] not in set(provision_params['roles']['storage']): + if provision_params['server_ip'] not in set(provision_params['roles']['storage-compute']): data += ''' # contrail-storage-manager role. contrail_%s::contrail_storage::contrail_storage{contrail_storage: - contrail_storage_repo_id => %s, + contrail_storage_repo_id => "%s", + contrail_num_storage_hosts => %s, contrail_storage_fsid => "%s", contrail_storage_virsh_uuid => "%s", - contrail_openstack_ip => "%s", + contrail_openstack_ip => "$contrail_openstack_ip", contrail_storage_mon_secret => "%s", contrail_storage_admin_key => "%s", contrail_storage_osd_bootstrap_key => "%s", contrail_storage_mon_hosts => \"''' % ( provision_params['puppet_manifest_version'], provision_params['storage_repo_id'], + provision_params['num_storage_hosts'], provision_params['storage_fsid'], provision_params['storage_virsh_uuid'], - contrail_openstack_mgmt_ip, provision_params['storage_mon_secret'], provision_params['admin_key'], provision_params['osd_bootstrap_key']) @@ -1483,8 +1931,8 @@ def puppet_add_storage_manager_role(self, provision_params, last_res_added): "webui": puppet_add_webui_role, "zookeeper": puppet_add_zk_role, "compute": puppet_add_compute_role, - "storage": puppet_add_storage_role, - "storage-mgr": puppet_add_storage_manager_role + "storage-compute": puppet_add_storage_role, + "storage-master": puppet_add_storage_manager_role } def provision_server(self, provision_params): @@ -1511,8 +1959,15 @@ def provision_server(self, provision_params): lines = f.readlines() if not server_line in lines: f.write(server_line) + cmd = "touch %s" %(self._site_manifest_file) + ret_code = subprocess.call(cmd, shell=True) return + if (provision_params['openstack_mgmt_ip'] == ''): + contrail_openstack_mgmt_ip = provision_params["server_ip"] + else: + contrail_openstack_mgmt_ip = provision_params['openstack_mgmt_ip'] + contrail_storage_cluster_network = self.get_control_network_mask(provision_params,contrail_openstack_mgmt_ip) # Storage params added to the top of the manifest file resource_data += '''$contrail_host_roles= [''' for role in provision_params['host_roles']: @@ -1520,10 +1975,16 @@ def provision_server(self, provision_params): resource_data = resource_data[:len(resource_data)-1]+']' resource_data += '''\n''' resource_data += '''$contrail_storage_num_osd= %s\n''' % (provision_params['storage_num_osd']) + resource_data += '''$contrail_storage_cluster_network= %s\n''' % (str(contrail_storage_cluster_network)) # Create resource to have repository configuration setup on the # target + resource_data += self._update_provision_start(provision_params) + + resource_data += self._repository_config(provision_params) + resource_data += self._update_kernel(provision_params) + # Always call common function for all the roles resource_data += self._roles_function_map["common"](self, provision_params) last_res_added =\ @@ -1535,7 +1996,7 @@ def provision_server(self, provision_params): # list array used to ensure that the role definitions are added # in a particular order roles = ['database', 'openstack', 'config', 'control', - 'collector', 'webui', 'zookeeper', 'compute', 'storage', 'storage-mgr'] + 'collector', 'webui', 'zookeeper', 'compute', 'storage-compute', 'storage-master'] for role in roles: if provision_params['roles'].get(role) and provision_params['server_ip'] in \ provision_params['roles'][role]: @@ -1546,6 +2007,11 @@ def provision_server(self, provision_params): if role == "zookeeper": last_res_added = "Contrail_$s::Contrail_common::Contrail-cfg-zk[\"contrail_cfg_zk\"]" %( provision_params['puppet_manifest_version']) + elif role == "storage-master" or role == "storage-compute": + storage_role = "storage" + last_res_added = ( + "Contrail_%s::Contrail_%s::Contrail_%s[\"contrail_%s\"]")\ + % (provision_params['puppet_manifest_version'], storage_role, storage_role, storage_role) else: last_res_added = ( "Contrail_%s::Contrail_%s::Contrail_%s[\"contrail_%s\"]")\ @@ -1556,6 +2022,11 @@ def provision_server(self, provision_params): if provision_params['execute_script']: resource_data += self.puppet_add_script_end_role(provision_params, last_res_added) + #TODO update last_res_added + #last_res_added = + + resource_data += self._update_provision_complete(provision_params, + last_res_added) # params_data and resource_data are compiled now. Add those to data and write # to manifest file for this server node. @@ -1575,6 +2046,8 @@ def provision_server(self, provision_params): lines = f.readlines() if not server_line in lines: f.write(server_line) + cmd = "touch %s" %(self._site_manifest_file) + ret_code = subprocess.call(cmd, shell=True) # end provision_server # class ServerMgrPuppet diff --git a/src/server_mgr_status.py b/src/server_mgr_status.py new file mode 100644 index 00000000..73c17a2c --- /dev/null +++ b/src/server_mgr_status.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +""" + Name : server_manager_status.py + Author : Abhay Joshi + Description : This file contains code that provides REST api interface to + configure, get and manage configurations for servers which + are part of the contrail cluster of nodes, interacting + together to provide a scalable virtual network system. + +""" + + +import pdb +import bottle +from bottle import route, run, request, abort, Bottle +from urlparse import urlparse, parse_qs +import time +import threading +from server_mgr_db import ServerMgrDb as db +from time import gmtime, strftime, localtime +from server_mgr_logger import ServerMgrlogger as ServerMgrlogger +from send_mail import send_mail +from server_mgr_defaults import * + +class ServerMgrStatusThread(threading.Thread): + + _smgr_log = None + _status_serverDb = None + + + ''' Class to run function that keeps validating the cobbler token + periodically (every 30 minutes) on a new thread. ''' + _pipe_start_app = None + def __init__(self, timer, server, status_thread_config): + threading.Thread.__init__(self) + self._status_thread_config = status_thread_config + + + def run(self): + #create the logger + try: + self._smgr_log = ServerMgrlogger() + except: + print "Error Creating logger object" + + # Connect to the cluster-servers database + try: + self._status_serverDb = db( + "/etc/contrail_smgr/smgr_data.db") + except: + self._smgr_log.log(self._smgr_log.DEBUG, + "Error Connecting to Server Database %s" + % (self._args.smgr_base_dir+self._args.db_name)) + exit() + + #set the status related handlers + status_bottle_app = Bottle() + status_bottle_app.route('/server_status', 'PUT', self.put_server_status) + + try: + bottle.run(status_bottle_app, + host=self._status_thread_config['listen_ip'], + port=self._status_thread_config['listen_port']) + except Exception as e: + # cleanup gracefully + exit() + + def put_server_status(self): + print "put-status" + #query_args = parse_qs(urlparse(bottle.request.url).query, + #keep_blank_values=True) + #match_key, match_value = query_args.popitem() + server_id = request.query['server_id'] + server_state = request.query['state'] + body = request.body.read() + server_data = {} + server_data['id'] = server_id + server_data['status'] = server_state + try: + message = server_id + ' ' + server_state + \ + strftime(" (%Y-%m-%d %H:%M:%S)", localtime()) + self._smgr_log.log(self._smgr_log.DEBUG, "Server status Data %s" % server_data) + + servers = self._status_serverDb.modify_server( + server_data) + + if server_state in email_events: + self.send_status_mail(server_id, message, message) + + except Exception as e: +# self.log_trace() + self._smgr_log.log(self._smgr_log.ERROR, "Error adding to db %s" % repr(e)) + abort(404, repr(e)) + + def get_email_list(self, email): + email_to = [] + if not email: + return email_to + if email.startswith('[') and email.endswith(']'): + email_to = eval(email) + else: + email_to = [s.strip() for s in email.split(',')] + return email_to + # end get_email_list + + def send_status_mail(self, server_id, event, message): + # Get server entry and find configured e-mail + servers = self._status_serverDb.get_server( + {"id" : server_id}, detail=True) + if not servers: + msg = "No server found with server_id " + server_id + self._smgr_log.log(self._smgr_log.ERROR, msg) + return -1 + server = servers[0] + email_to = [] + if 'email' in server and server['email']: + email_to = self.get_email_list(server['email']) + else: + # Get cluster entry to find configured e-mail + if 'cluster_id' in server and server['cluster_id']: + cluster_id = server['cluster_id'] + cluster = self._status_serverDb.get_cluster( + {"id" : cluster_id}, detail=True) + if cluster and 'email' in cluster[0] and cluster[0]['email']: + email_to = self.get_email_list(cluster[0]['email']) + else: + self._smgr_log.log(self._smgr_log.DEBUG, + "cluster or server doesn't configured for email") + return 0 + else: + self._smgr_log.log(self._smgr_log.DEBUG, "server not associated with a cluster") + return 0 + send_mail(event, message, '', email_to, + self._status_thread_config['listen_ip'], '25') + msg = "An email is sent to " + ','.join(email_to) + " with content " + message + self._smgr_log.log(self._smgr_log.DEBUG, msg) + # send_status_mail + diff --git a/src/sm-config.ini b/src/sm-config.ini new file mode 100644 index 00000000..cb864104 --- /dev/null +++ b/src/sm-config.ini @@ -0,0 +1,14 @@ +[SERVER-MANAGER] +listen_ip_addr = 127.0.0.1 +listen_port = 9001 +database_name = smgr_data.db +server_manager_base_dir = /etc/contrail_smgr/ +html_root_dir = /var/www/html/ +cobbler_ip_address = 127.0.0.1 +cobbler_port = +cobbler_username = cobbler +cobbler_password = cobbler +ipmi_username = ADMIN +ipmi_password = ADMIN +ipmi_type = ipmilan +puppet_dir = /etc/puppet/ diff --git a/src/smgr_config.ini b/src/smgr_config.ini deleted file mode 100644 index 07f2da6b..00000000 --- a/src/smgr_config.ini +++ /dev/null @@ -1,14 +0,0 @@ -[SERVER-MANAGER] -listen_ip_addr = 127.0.0.1 -listen_port = 9001 -db_name = smgr_data.db -smgr_base_dir = /etc/contrail_smgr/ -html_root_dir = /var/www/html/ -cobbler_ip = 127.0.0.1 -cobbler_port = -cobbler_user = testing -cobbler_passwd = testing -power_user = ADMIN -power_pass = ADMIN -power_type = ipmilan -puppet_dir = /etc/puppet/ diff --git a/src/tags.ini b/src/tags.ini new file mode 100644 index 00000000..3162bdf2 --- /dev/null +++ b/src/tags.ini @@ -0,0 +1,6 @@ +[TAGS] +tag1 = datacenter +tag2 = floor +tag3 = hall +tag4 = rack +tag5 = user_tag diff --git a/src/third_party/server_post_install.py b/src/third_party/server_post_install.py index f82782c5..faaf03a9 100755 --- a/src/third_party/server_post_install.py +++ b/src/third_party/server_post_install.py @@ -23,7 +23,7 @@ #[root@a3s17 modules]# more /var/log/cobbler/install.log #system a3s10 10.84.16.3 stop Thu Jun 5 14:37:58 2014 -_DEF_SMGR_PORT=9001 +_DEF_SMGR_STATUS_PORT=9002 def register(): # trigger type @@ -54,10 +54,10 @@ def run(api, args, logger): name = args[1] # name of system or profile server_ip = args[2] # ip or "?" ip = socket.gethostbyname(socket.gethostname()) - object = 'status' - url_str = object + "?" + "server_id=" + name + object = 'server_status' + url_str = object + "?" + "server_id=" + name + "&state=reimage_completed" payload = 'reimage completed' - send_REST_request(ip, '9001', url_str, payload) + send_REST_request(ip, str(_DEF_SMGR_STATUS_PORT), url_str, payload) fd = open("/var/log/cobbler/contrail_install.log","a+") fd.write("\n%s\t%s\t%s\tcompleted\t%s\n" % (objtype,name,ip,time.asctime(time.localtime(time.time())))) fd.write("url:%s, payload:%s\n" % (url_str, payload)) diff --git a/src/third_party/server_pre_install.py b/src/third_party/server_pre_install.py index 87f6fc6e..a3a9350e 100755 --- a/src/third_party/server_pre_install.py +++ b/src/third_party/server_pre_install.py @@ -14,7 +14,7 @@ from StringIO import StringIO import json import socket - +import urllib plib = distutils.sysconfig.get_python_lib() mod_path="%s/cobbler" % plib @@ -23,7 +23,7 @@ #[root@a3s17 modules]# more /var/log/cobbler/install.log #system a3s10 10.84.16.3 start 1401606565.15 -_DEF_SMGR_PORT=9001 +_DEF_SMGR_STATUS_PORT=9002 def register(): # trigger type @@ -54,10 +54,10 @@ def run(api, args, logger): name = args[1] # name of system or profile server_ip = args[2] # ip or "?" ip = socket.gethostbyname(socket.gethostname()) - object = 'status' - url_str = object + "?" + "server_id=" + name + object = 'server_status' + url_str = object + "?" + "server_id=" + name + "&state=reimage_started" payload = 'reimage start' - send_REST_request(ip, '9001', url_str, payload) + send_REST_request(ip, str(_DEF_SMGR_STATUS_PORT), url_str, payload) fd = open("/var/log/cobbler/contrail_install.log","a+") fd.write("\n%s\t%s\t%s\tstart\t%s\n" % (objtype,name,ip,time.asctime(time.localtime(time.time())))) fd.write("url:%s, payload:%s\n" % (url_str, payload)) diff --git a/src/utils/smgr_upgrade_script.sh b/src/utils/smgr_upgrade_script.sh new file mode 100644 index 00000000..20fb94b9 --- /dev/null +++ b/src/utils/smgr_upgrade_script.sh @@ -0,0 +1,20 @@ +#Upgrade script to upgrade the server-manager RPM(Centos) +#Usage: ./smgr_upgrade_script.sh +#!/bin/sh +set -x -v +mkdir -p /contrail-smgr-save +cp /etc/cobbler/dhcp.template /contrail-smgr-save +cp /etc/cobbler/named.template /contrail-smgr-save +cp /etc/cobbler/settings /contrail-smgr-save +cp /etc/cobbler/zone.template /contrail-smgr-save +cp -r /etc/cobbler/zone_templates /contrail-smgr-save +service contrail-server-manager stop +yum -y remove contrail-server-manager +yum -y localinstall $1 +cp /contrail-smgr-save/dhcp.template /etc/cobbler/dhcp.template +cp /contrail-smgr-save/named.template /etc/cobbler/named.template +cp /contrail-smgr-save/settings /etc/cobbler/settings +cp /contrail-smgr-save/zone.template /etc/cobbler/zone.template +cp -r /contrail-smgr-save/zone_templates /etc/cobbler/ +service contrail-server-manager start + diff --git a/src/vmware/esxi_contrailvm.py b/src/vmware/esxi_contrailvm.py index d56c220c..e7dc479d 100644 --- a/src/vmware/esxi_contrailvm.py +++ b/src/vmware/esxi_contrailvm.py @@ -92,17 +92,17 @@ def __init__(self, vm_params): self.uplink_vswitch = vm_params['uplink_vswitch'] self.server = vm_params['server'] self.username = vm_params['username'] - self.passwd = vm_params['passwd'] + self.password = vm_params['password'] self.thindisk = vm_params['thindisk'] self.vm_domain = vm_params['domain'] self.vm_id = 0 self.smgr_ip = vm_params['smgr_ip'] self.vm_server = vm_params['vm_server'] - self.vm_passwd = vm_params['vm_passwd'] + self.vm_password = vm_params['vm_password'] self.vm_deb = vm_params['vm_deb'] self._create_networking() print self._create_vm() - print self._install_contrailvm_pkg(self.eth0_ip, "root", self.vm_passwd, self.vm_domain, self.vm_server, + print self._install_contrailvm_pkg(self.eth0_ip, "root", self.vm_password, self.vm_domain, self.vm_server, self.vm_deb, self.smgr_ip) #end __init__ @@ -180,7 +180,7 @@ def _create_vm(self): self._create_vmx_file(self.vm, self.vmdk, self.eth0_mac, self.eth0_pg, self.eth1_pg) # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) vm_store = self.datastore+"/"+self.vm+"/" get_vmid = 0 get_vmid_cmd = ("vim-cmd vmsvc/getallvms | grep %s | awk \'{print $1}\'") % (self.vm) @@ -195,7 +195,7 @@ def _create_vm(self): # open sftp and transfer .vmx and thin disk and close the channel transport = paramiko.Transport((self.server, 22)) - transport.connect(username=self.username, password=self.passwd) + transport.connect(username=self.username, password=self.password) sftp = paramiko.SFTPClient.from_transport(transport) dst_vmx = self.vm+".vmx" thin_vmdk = self.vmdk+"-disk.vmdk" @@ -245,7 +245,7 @@ def _create_vm(self): def _unregister_vm(self): # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) unregister_vm_cmd = ("vim-cmd vmsvc/unregister %s") % (self.vm_id) out, err = execute_cmd_out(ssh_session, unregister_vm_cmd) ssh_session.close() @@ -253,7 +253,7 @@ def _unregister_vm(self): def _power_off_vm(self): # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) power_off_vm_cmd = ("vim-cmd vmsvc/power.off %s") % (self.vm_id) out, err = execute_cmd_out(ssh_session, power_off_vm_cmd) ssh_session.close() @@ -261,7 +261,7 @@ def _power_off_vm(self): def _power_on_vm(self): # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) power_on_vm_cmd = ("vim-cmd vmsvc/power.on %s") % (self.vm_id) out, err = execute_cmd_out(ssh_session, power_on_vm_cmd) ssh_session.close() @@ -269,7 +269,7 @@ def _power_on_vm(self): def _power_reset_vm(self): # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) power_reset_vm_cmd = ("vim-cmd vmsvc/power.reset %s") % (self.vm_id) out, err = execute_cmd_out(ssh_session, power_reset_vm_cmd) ssh_session.close() @@ -277,7 +277,7 @@ def _power_reset_vm(self): def _create_networking(self): # open ssh session - ssh_session = ssh(self.server, self.username, self.passwd) + ssh_session = ssh(self.server, self.username, self.password) if ssh_session is None: return @@ -407,12 +407,12 @@ def _install_contrailvm_pkg(self, ip, user, passwd, domain, server , 'uplink_vswitch':'vSwitch0', 'server':"127.0.0.1", 'username':"root", - 'passwd':"c0ntrail123", + 'password':"c0ntrail123", 'thindisk':"/tmp/ContrailVM-disk.vmdk", 'domain':'englab.juniper.net', 'smgr_ip':'10.204.217.59', 'vm_server': 'contrail-vm', - 'vm_passwd': 'c0ntrail123', + 'vm_password': 'c0ntrail123', 'vm_deb': '/root/contrail-install-packages_1.05-5440~havana_all.deb' }